diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 7e0ce0734b..34fb318911 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -98,6 +98,8 @@ import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.api.WMNullablePool; import org.apache.hadoop.hive.metastore.api.WriteEventInfo; +import org.apache.hadoop.hive.metastore.api.ReplicationMetricList; +import org.apache.hadoop.hive.metastore.api.GetReplicationMetricsRequest; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.ColStatsObjWithSourceInfo; import org.apache.thrift.TException; @@ -1377,6 +1379,16 @@ public void scheduledQueryProgress(ScheduledQueryProgressInfo info) throw new RuntimeException("unimplemented"); } + @Override + public void addReplicationMetrics(ReplicationMetricList replicationMetricList) { + throw new RuntimeException("unimplemented"); + } + + @Override + public ReplicationMetricList getReplicationMetrics(GetReplicationMetricsRequest replicationMetricsRequest) { + throw new RuntimeException("unimplemented"); + } + @Override public ScheduledQuery getScheduledQuery(ScheduledQueryKey scheduleKey) throws MetaException, NoSuchObjectException { throw new RuntimeException("unimplemented"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 641df005ed..169fed857d 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -69,6 +69,9 @@ import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.parse.repl.load.EventDumpDirComparator; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; @@ -429,8 +432,17 @@ private Task getReplLoadRootTask(String sourceDb, String replicadb, boolean isIn HiveConf confTemp = new HiveConf(); confTemp.set("hive.repl.enable.move.optimization", "true"); Path loadPath = new Path(tuple.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); + ReplicationMetricCollector metricCollector; + if (isIncrementalDump) { + metricCollector = new IncrementalLoadMetricCollector(replicadb, tuple.dumpLocation, 0, + confTemp); + } else { + metricCollector = new BootstrapLoadMetricCollector(replicadb, tuple.dumpLocation, 0, + confTemp); + } ReplLoadWork replLoadWork = new ReplLoadWork(confTemp, loadPath.toString(), sourceDb, replicadb, - null, null, isIncrementalDump, Long.valueOf(tuple.lastReplId)); + null, null, isIncrementalDump, Long.valueOf(tuple.lastReplId), + 0L, metricCollector); Task replLoadTask = TaskFactory.get(replLoadWork, confTemp); replLoadTask.initialize(null, null, new TaskQueue(driver.getContext()), driver.getContext()); replLoadTask.executeTask(null); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java index a13d842183..2e1e5e0544 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java @@ -26,11 +26,20 @@ import org.apache.hadoop.hive.ql.exec.repl.ReplAck; import org.apache.hadoop.hive.ql.exec.repl.ReplDumpWork; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; +import org.apache.hadoop.hive.ql.parse.repl.metric.MetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metric; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Progress; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.scheduled.ScheduledQueryExecutionService; import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Before; import org.junit.After; +import org.junit.Assert; import org.junit.Test; import org.junit.BeforeClass; import org.junit.Ignore; @@ -41,6 +50,8 @@ import java.util.Base64; import java.util.Map; import java.util.HashMap; +import java.util.List; +import java.util.ArrayList; /** @@ -172,7 +183,12 @@ public void testAcidTablesReplLoadBootstrapIncr() throws Throwable { public void testExternalTablesReplLoadBootstrapIncr() throws Throwable { // Bootstrap String withClause = " WITH('" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname - + "'='/replica_external_base')"; + + "'='/replica_external_base', '" + HiveConf.ConfVars.REPL_INCLUDE_AUTHORIZATION_METADATA + + "' = 'true' ,'" + HiveConf.ConfVars.REPL_INCLUDE_ATLAS_METADATA + "' = 'true' , '" + + HiveConf.ConfVars.HIVE_IN_TEST + "' = 'true'" + ",'"+ HiveConf.ConfVars.REPL_ATLAS_ENDPOINT + + "' = 'http://localhost:21000/atlas'" + ",'"+ HiveConf.ConfVars.REPL_ATLAS_REPLICATED_TO_DB + "' = 'tgt'" + + ",'"+ HiveConf.ConfVars.REPL_SOURCE_CLUSTER_NAME + "' = 'cluster0'" + + ",'"+ HiveConf.ConfVars.REPL_TARGET_CLUSTER_NAME + "' = 'cluster1')"; primary.run("use " + primaryDbName) .run("create external table t2 (id int)") .run("insert into t2 values(1)") @@ -183,7 +199,7 @@ public void testExternalTablesReplLoadBootstrapIncr() throws Throwable { ReplDumpWork.injectNextDumpDirForTest(String.valueOf(next), true); primary.run("create scheduled query s1_t2 every 5 seconds as repl dump " + primaryDbName + withClause); replica.run("create scheduled query s2_t2 every 5 seconds as repl load " + primaryDbName + " INTO " - + replicatedDbName); + + replicatedDbName + withClause); Path dumpRoot = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR), Base64.getEncoder().encodeToString(primaryDbName.toLowerCase().getBytes(StandardCharsets.UTF_8.name()))); FileSystem fs = FileSystem.get(dumpRoot.toUri(), primary.hiveConf); @@ -196,7 +212,20 @@ public void testExternalTablesReplLoadBootstrapIncr() throws Throwable { .verifyResult("t2") .run("select id from t2 order by id") .verifyResults(new String[]{"1", "2"}); - + long lastReplId = Long.parseLong(primary.status(replicatedDbName).getOutput().get(0)); + DumpMetaData dumpMetaData = new DumpMetaData(ackPath.getParent(), primary.hiveConf); + List replicationMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(2, replicationMetrics.size()); + //Generate expected metrics + List expectedReplicationMetrics = new ArrayList<>(); + expectedReplicationMetrics.add(generateExpectedMetric("s1_t2", 0, primaryDbName, + Metadata.ReplicationType.BOOTSTRAP, ackPath.getParent().toString(), lastReplId, Status.SUCCESS, + generateDumpStages(true))); + expectedReplicationMetrics.add(generateExpectedMetric("s2_t2", + dumpMetaData.getDumpExecutionId(), replicatedDbName, + Metadata.ReplicationType.BOOTSTRAP, ackPath.getParent().toString(), lastReplId, Status.SUCCESS, + generateLoadStages(true))); + checkMetrics(expectedReplicationMetrics, replicationMetrics); // First incremental, after bootstrap primary.run("use " + primaryDbName) .run("insert into t2 values(3)") @@ -215,6 +244,130 @@ public void testExternalTablesReplLoadBootstrapIncr() throws Throwable { replica.run("drop scheduled query s2_t2"); } } + + private void checkMetrics(List expectedReplicationMetrics, + List actualMetrics) { + Assert.assertEquals(expectedReplicationMetrics.size(), actualMetrics.size()); + int metricCounter = 0; + for (ReplicationMetric actualMetric : actualMetrics) { + for (ReplicationMetric expecMetric : expectedReplicationMetrics) { + if (actualMetric.getPolicy().equalsIgnoreCase(expecMetric.getPolicy())) { + Assert.assertEquals(expecMetric.getDumpExecutionId(), actualMetric.getDumpExecutionId()); + Assert.assertEquals(expecMetric.getMetadata().getDbName(), actualMetric.getMetadata().getDbName()); + Assert.assertEquals(expecMetric.getMetadata().getLastReplId(), + actualMetric.getMetadata().getLastReplId()); + Assert.assertEquals(expecMetric.getMetadata().getStagingDir(), + actualMetric.getMetadata().getStagingDir()); + Assert.assertEquals(expecMetric.getMetadata().getReplicationType(), + actualMetric.getMetadata().getReplicationType()); + Assert.assertEquals(expecMetric.getProgress().getStatus(), actualMetric.getProgress().getStatus()); + Assert.assertEquals(expecMetric.getProgress().getStages().size(), + actualMetric.getProgress().getStages().size()); + List expectedStages = expecMetric.getProgress().getStages(); + List actualStages = actualMetric.getProgress().getStages(); + int counter = 0; + for (Stage actualStage : actualStages) { + for (Stage expeStage : expectedStages) { + if (actualStage.getName().equalsIgnoreCase(expeStage.getName())) { + Assert.assertEquals(expeStage.getStatus(), actualStage.getStatus()); + Assert.assertEquals(expeStage.getMetrics().size(), actualStage.getMetrics().size()); + for (Metric actMetric : actualStage.getMetrics()) { + for (Metric expMetric : expeStage.getMetrics()) { + if (actMetric.getName().equalsIgnoreCase(expMetric.getName())) { + Assert.assertEquals(expMetric.getTotalCount(), actMetric.getTotalCount()); + Assert.assertEquals(expMetric.getCurrentCount(), actMetric.getCurrentCount()); + } + } + } + counter++; + if (counter == actualStages.size()) { + break; + } + } + } + } + metricCounter++; + if (metricCounter == actualMetrics.size()) { + break; + } + } + } + } + } + + private List generateLoadStages(boolean isBootstrap) { + List stages = new ArrayList<>(); + //Ranger + Stage rangerDump = new Stage("RANGER_LOAD", Status.SUCCESS, 0); + Metric rangerMetric = new Metric(ReplUtils.MetricName.POLICIES.name(), 0); + rangerDump.addMetric(rangerMetric); + stages.add(rangerDump); + //Atlas + Stage atlasDump = new Stage("ATLAS_LOAD", Status.SUCCESS, 0); + Metric atlasMetric = new Metric(ReplUtils.MetricName.ENTITIES.name(), 0); + atlasDump.addMetric(atlasMetric); + stages.add(atlasDump); + //Hive + Stage replDump = new Stage("REPL_LOAD", Status.SUCCESS, 0); + if (isBootstrap) { + Metric hiveMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 1); + hiveMetric.setCurrentCount(1); + replDump.addMetric(hiveMetric); + hiveMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 0); + replDump.addMetric(hiveMetric); + } else { + Metric hiveMetric = new Metric(ReplUtils.MetricName.EVENTS.name(), 1); + hiveMetric.setCurrentCount(1); + replDump.addMetric(hiveMetric); + } + stages.add(replDump); + return stages; + } + + private List generateDumpStages(boolean isBootstrap) { + List stages = new ArrayList<>(); + //Ranger + Stage rangerDump = new Stage("RANGER_DUMP", Status.SUCCESS, 0); + Metric rangerMetric = new Metric(ReplUtils.MetricName.POLICIES.name(), 0); + rangerDump.addMetric(rangerMetric); + stages.add(rangerDump); + //Atlas + Stage atlasDump = new Stage("ATLAS_DUMP", Status.SUCCESS, 0); + Metric atlasMetric = new Metric(ReplUtils.MetricName.ENTITIES.name(), 0); + atlasDump.addMetric(atlasMetric); + stages.add(atlasDump); + //Hive + Stage replDump = new Stage("REPL_DUMP", Status.SUCCESS, 0); + if (isBootstrap) { + Metric hiveMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 1); + hiveMetric.setCurrentCount(1); + replDump.addMetric(hiveMetric); + hiveMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 0); + replDump.addMetric(hiveMetric); + } else { + Metric hiveMetric = new Metric(ReplUtils.MetricName.EVENTS.name(), 1); + hiveMetric.setCurrentCount(1); + replDump.addMetric(hiveMetric); + } + stages.add(replDump); + return stages; + } + + private ReplicationMetric generateExpectedMetric(String policy, long dumpExecId, String dbName, + Metadata.ReplicationType replicationType, String staging, + long lastReplId, Status status, List stages) { + Metadata metadata = new Metadata(dbName, replicationType, staging); + metadata.setLastReplId(lastReplId); + ReplicationMetric replicationMetric = new ReplicationMetric(0, policy, dumpExecId, metadata); + Progress progress = new Progress(); + progress.setStatus(status); + for (Stage stage : stages) { + progress.addStage(stage); + } + replicationMetric.setProgress(progress); + return replicationMetric; + } + private void waitForAck(FileSystem fs, Path ackFile, long timeout) throws IOException { long oldTime = System.currentTimeMillis(); long sleepInterval = 2; diff --git a/metastore/scripts/upgrade/derby/058-HIVE-23516.derby.sql b/metastore/scripts/upgrade/derby/058-HIVE-23516.derby.sql new file mode 100644 index 0000000000..19a92fd0ad --- /dev/null +++ b/metastore/scripts/upgrade/derby/058-HIVE-23516.derby.sql @@ -0,0 +1,13 @@ +--Create table replication metrics +CREATE TABLE "APP"."REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(4000), + "RM_PROGRESS" varchar(4000), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "APP"."REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "APP"."REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); \ No newline at end of file diff --git a/metastore/scripts/upgrade/mssql/035-HIVE-23516.mssql.sql b/metastore/scripts/upgrade/mssql/035-HIVE-23516.mssql.sql new file mode 100644 index 0000000000..732fec4cdd --- /dev/null +++ b/metastore/scripts/upgrade/mssql/035-HIVE-23516.mssql.sql @@ -0,0 +1,13 @@ +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(max), + "RM_PROGRESS" varchar(max), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); \ No newline at end of file diff --git a/metastore/scripts/upgrade/mysql/050-HIVE-23516.mysql.sql b/metastore/scripts/upgrade/mysql/050-HIVE-23516.mysql.sql new file mode 100644 index 0000000000..98f470b061 --- /dev/null +++ b/metastore/scripts/upgrade/mysql/050-HIVE-23516.mysql.sql @@ -0,0 +1,13 @@ +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(4000), + "RM_PROGRESS" varchar(4000), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); \ No newline at end of file diff --git a/metastore/scripts/upgrade/oracle/050-HIVE-23516.oracle.sql b/metastore/scripts/upgrade/oracle/050-HIVE-23516.oracle.sql new file mode 100644 index 0000000000..7a8f2927d7 --- /dev/null +++ b/metastore/scripts/upgrade/oracle/050-HIVE-23516.oracle.sql @@ -0,0 +1,13 @@ +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" number NOT NULL, + "RM_POLICY" varchar2(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" number NOT NULL, + "RM_METADATA" varchar2(4000), + "RM_PROGRESS" varchar2(4000), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); \ No newline at end of file diff --git a/metastore/scripts/upgrade/postgres/049-HIVE-23516.postgres.sql b/metastore/scripts/upgrade/postgres/049-HIVE-23516.postgres.sql new file mode 100644 index 0000000000..98f470b061 --- /dev/null +++ b/metastore/scripts/upgrade/postgres/049-HIVE-23516.postgres.sql @@ -0,0 +1,13 @@ +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(4000), + "RM_PROGRESS" varchar(4000), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); \ No newline at end of file diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java index be48f99c59..502dabbd06 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.exec.repl; +import com.google.common.annotations.VisibleForTesting; import org.apache.atlas.model.impexp.AtlasExportRequest; import org.apache.atlas.model.impexp.AtlasServer; import org.apache.atlas.model.instance.AtlasObjectId; @@ -35,6 +36,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; import org.apache.hadoop.hive.ql.parse.repl.dump.log.AtlasDumpLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,11 +49,12 @@ import java.net.MalformedURLException; import java.net.URL; import java.nio.charset.Charset; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; import java.util.Map; +import java.util.HashMap; import java.util.Set; +import java.util.List; +import java.util.Arrays; +import java.util.ArrayList; /** * Atlas Metadata Replication Dump Task. @@ -62,6 +65,17 @@ private static final long serialVersionUID = 1L; private transient AtlasRestClient atlasRestClient; + public AtlasDumpTask() { + super(); + } + + @VisibleForTesting + AtlasDumpTask(final AtlasRestClient atlasRestClient, final HiveConf conf, final AtlasDumpWork work) { + this.conf = conf; + this.work = work; + this.atlasRestClient = atlasRestClient; + } + @Override public int execute() { try { @@ -71,6 +85,9 @@ public int execute() { AtlasDumpLogger replLogger = new AtlasDumpLogger(atlasReplInfo.getSrcDB(), atlasReplInfo.getStagingDir().toString()); replLogger.startLog(); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.ENTITIES.name(), 0L); + work.getMetricCollector().reportStageStart(getName(), metricMap); atlasRestClient = new AtlasRestClientBuilder(atlasReplInfo.getAtlasEndpoint()) .getClient(atlasReplInfo.getConf()); AtlasRequestBuilder atlasRequestBuilder = new AtlasRequestBuilder(); @@ -81,15 +98,21 @@ public int execute() { LOG.debug("Finished dumping atlas metadata, total:{} bytes written", numBytesWritten); createDumpMetadata(atlasReplInfo, currentModifiedTime); replLogger.endLog(0L); + work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS); return 0; } catch (Exception e) { LOG.error("Exception while dumping atlas metadata", e); setException(e); + try { + work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics ", ex); + } return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } } - public AtlasReplInfo createAtlasReplInfo() throws SemanticException, MalformedURLException { + private AtlasReplInfo createAtlasReplInfo() throws SemanticException, MalformedURLException { String errorFormat = "%s is mandatory config for Atlas metadata replication"; //Also validates URL for endpoint. String endpoint = new URL(ReplUtils.getNonEmpty(HiveConf.ConfVars.REPL_ATLAS_ENDPOINT.varname, conf, errorFormat)) @@ -105,7 +128,7 @@ public AtlasReplInfo createAtlasReplInfo() throws SemanticException, MalformedUR return atlasReplInfo; } - public long lastStoredTimeStamp() throws SemanticException { + private long lastStoredTimeStamp() throws SemanticException { Path prevMetadataPath = new Path(work.getPrevAtlasDumpDir(), EximUtil.METADATA_NAME); BufferedReader br = null; try { @@ -138,7 +161,7 @@ private long getCurrentTimestamp(AtlasReplInfo atlasReplInfo, String entityGuid) return ret; } - public long dumpAtlasMetaData(AtlasRequestBuilder atlasRequestBuilder, AtlasReplInfo atlasReplInfo) + long dumpAtlasMetaData(AtlasRequestBuilder atlasRequestBuilder, AtlasReplInfo atlasReplInfo) throws SemanticException { InputStream inputStream = null; long numBytesWritten = 0L; @@ -146,7 +169,7 @@ public long dumpAtlasMetaData(AtlasRequestBuilder atlasRequestBuilder, AtlasRepl AtlasExportRequest exportRequest = atlasRequestBuilder.createExportRequest(atlasReplInfo, atlasReplInfo.getSrcCluster()); inputStream = atlasRestClient.exportData(exportRequest); - FileSystem fs = FileSystem.get(atlasReplInfo.getStagingDir().toUri(), atlasReplInfo.getConf()); + FileSystem fs = atlasReplInfo.getStagingDir().getFileSystem(atlasReplInfo.getConf()); Path exportFilePath = new Path(atlasReplInfo.getStagingDir(), ReplUtils.REPL_ATLAS_EXPORT_FILE_NAME); numBytesWritten = Utils.writeFile(fs, exportFilePath, inputStream); } catch (SemanticException ex) { @@ -181,7 +204,7 @@ private String checkHiveEntityGuid(AtlasRequestBuilder atlasRequestBuilder, Stri return guid; } - public void createDumpMetadata(AtlasReplInfo atlasReplInfo, long lastModifiedTime) throws SemanticException { + void createDumpMetadata(AtlasReplInfo atlasReplInfo, long lastModifiedTime) throws SemanticException { Path dumpFile = new Path(atlasReplInfo.getStagingDir(), EximUtil.METADATA_NAME); List> listValues = new ArrayList<>(); listValues.add( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpWork.java index 3344152f43..3f10730be4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpWork.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.exec.repl; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -34,13 +35,16 @@ private final Path stagingDir; private final boolean bootstrap; private final Path prevAtlasDumpDir; + private final transient ReplicationMetricCollector metricCollector; - public AtlasDumpWork(String srcDB, Path stagingDir, boolean bootstrap, Path prevAtlasDumpDir) { + public AtlasDumpWork(String srcDB, Path stagingDir, boolean bootstrap, Path prevAtlasDumpDir, + ReplicationMetricCollector metricCollector) { this.srcDB = srcDB; this.stagingDir = stagingDir; this.bootstrap = bootstrap; this.prevAtlasDumpDir = prevAtlasDumpDir; + this.metricCollector = metricCollector; } public boolean isBootstrap() { @@ -58,4 +62,8 @@ public String getSrcDB() { public Path getStagingDir() { return stagingDir; } + + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java index fa18bf3236..534c85da32 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.exec.repl; +import com.google.common.annotations.VisibleForTesting; import org.apache.atlas.model.impexp.AtlasImportRequest; import org.apache.atlas.model.impexp.AtlasImportResult; import org.apache.hadoop.fs.FileSystem; @@ -31,8 +32,8 @@ import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.parse.SemanticException; - import org.apache.hadoop.hive.ql.parse.repl.load.log.AtlasLoadLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,6 +45,8 @@ import java.net.MalformedURLException; import java.net.URL; import java.nio.charset.Charset; +import java.util.HashMap; +import java.util.Map; /** * Atlas Metadata Replication Load Task. @@ -52,10 +55,23 @@ private static final long serialVersionUID = 1L; private static final transient Logger LOG = LoggerFactory.getLogger(AtlasLoadTask.class); + public AtlasLoadTask() { + super(); + } + + @VisibleForTesting + AtlasLoadTask(final HiveConf conf, final AtlasLoadWork work) { + this.conf = conf; + this.work = work; + } + @Override public int execute() { try { AtlasReplInfo atlasReplInfo = createAtlasReplInfo(); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.ENTITIES.name(), 0L); + work.getMetricCollector().reportStageStart(getName(), metricMap); LOG.info("Loading atlas metadata from srcDb: {} to tgtDb: {} from staging: {}", atlasReplInfo.getSrcDB(), atlasReplInfo.getTgtDB(), atlasReplInfo.getStagingDir()); AtlasLoadLogger replLogger = new AtlasLoadLogger(atlasReplInfo.getSrcDB(), atlasReplInfo.getTgtDB(), @@ -63,16 +79,23 @@ public int execute() { replLogger.startLog(); int importCount = importAtlasMetadata(atlasReplInfo); replLogger.endLog(importCount); + work.getMetricCollector().reportStageProgress(getName(), ReplUtils.MetricName.ENTITIES.name(), importCount); LOG.info("Atlas entities import count {}", importCount); + work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS); return 0; } catch (Exception e) { LOG.error("Exception while loading atlas metadata", e); setException(e); + try { + work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics ", ex); + } return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } } - public AtlasReplInfo createAtlasReplInfo() throws SemanticException, MalformedURLException { + AtlasReplInfo createAtlasReplInfo() throws SemanticException, MalformedURLException { String errorFormat = "%s is mandatory config for Atlas metadata replication"; //Also validates URL for endpoint. String endpoint = new URL(ReplUtils.getNonEmpty(HiveConf.ConfVars.REPL_ATLAS_ENDPOINT.varname, conf, errorFormat)) @@ -111,7 +134,7 @@ private String getStoredFsUri(Path atlasDumpDir) throws SemanticException { } } - public int importAtlasMetadata(AtlasReplInfo atlasReplInfo) throws Exception { + private int importAtlasMetadata(AtlasReplInfo atlasReplInfo) throws Exception { AtlasRequestBuilder atlasRequestBuilder = new AtlasRequestBuilder(); AtlasImportRequest importRequest = atlasRequestBuilder.createImportRequest( atlasReplInfo.getSrcDB(), atlasReplInfo.getTgtDB(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadWork.java index 4dc1ea81a6..817c214675 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadWork.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.exec.repl; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -33,11 +34,13 @@ private final String srcDB; private final String tgtDB; private final Path stagingDir; + private final transient ReplicationMetricCollector metricCollector; - public AtlasLoadWork(String srcDB, String tgtDB, Path stagingDir) { + public AtlasLoadWork(String srcDB, String tgtDB, Path stagingDir, ReplicationMetricCollector metricCollector) { this.srcDB = srcDB; this.tgtDB = tgtDB; this.stagingDir = stagingDir; + this.metricCollector = metricCollector; } public static long getSerialVersionUID() { @@ -55,4 +58,8 @@ public String getTgtDB() { public Path getStagingDir() { return stagingDir; } + + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java index 5a56a6be95..92ca6ea6ed 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java @@ -33,13 +33,16 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; import org.apache.hadoop.hive.ql.parse.repl.dump.log.RangerDumpLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Serializable; import java.net.URL; +import java.util.HashMap; import java.util.List; +import java.util.Map; /** * RangerDumpTask. @@ -77,6 +80,11 @@ public int execute() { long exportCount = 0; Path filePath = null; LOG.info("Exporting Ranger Metadata"); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.POLICIES.name(), 0L); + work.getMetricCollector().reportStageStart(getName(), metricMap); + replLogger = new RangerDumpLogger(work.getDbName(), work.getCurrentDumpPath().toString()); + replLogger.startLog(); if (rangerRestClient == null) { rangerRestClient = getRangerRestClient(); } @@ -91,8 +99,6 @@ public int execute() { if (StringUtils.isEmpty(rangerEndpoint) || !rangerRestClient.checkConnection(rangerEndpoint)) { throw new SemanticException("Ranger endpoint is not valid " + rangerEndpoint); } - replLogger = new RangerDumpLogger(work.getDbName(), work.getCurrentDumpPath().toString()); - replLogger.startLog(); RangerExportPolicyList rangerExportPolicyList = rangerRestClient.exportRangerPolicies(rangerEndpoint, work.getDbName(), rangerHiveServiceName); List rangerPolicies = rangerExportPolicyList.getPolicies(); @@ -109,15 +115,22 @@ public int execute() { if (filePath != null) { LOG.info("Ranger policy export finished successfully"); exportCount = rangerExportPolicyList.getListSize(); + work.getMetricCollector().reportStageProgress(getName(), ReplUtils.MetricName.POLICIES.name(), exportCount); } } replLogger.endLog(exportCount); + work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS); LOG.debug("Ranger policy export filePath:" + filePath); LOG.info("Number of ranger policies exported {}", exportCount); return 0; } catch (Exception e) { LOG.error("failed", e); setException(e); + try { + work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics ", ex); + } return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpWork.java index 026402b43e..b1393b20d5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpWork.java @@ -19,6 +19,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import java.io.Serializable; @@ -36,10 +37,12 @@ private static final long serialVersionUID = 1L; private Path currentDumpPath; private String dbName; + private final transient ReplicationMetricCollector metricCollector; - public RangerDumpWork(Path currentDumpPath, String dbName) { + public RangerDumpWork(Path currentDumpPath, String dbName, ReplicationMetricCollector metricCollector) { this.currentDumpPath = currentDumpPath; this.dbName = dbName; + this.metricCollector = metricCollector; } public Path getCurrentDumpPath() { @@ -53,4 +56,8 @@ public String getDbName() { URL getRangerConfigResource() { return getClass().getClassLoader().getResource(ReplUtils.RANGER_CONFIGURATION_RESOURCE_NAME); } + + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java index 4e8a44fdae..fa57efd2fc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; import org.apache.hadoop.hive.ql.parse.repl.load.log.RangerLoadLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,7 +41,9 @@ import java.io.Serializable; import java.net.URL; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_RANGER_ADD_DENY_POLICY_TARGET; /** @@ -101,6 +104,9 @@ public int execute() { replLogger = new RangerLoadLogger(work.getSourceDbName(), work.getTargetDbName(), work.getCurrentDumpPath().toString(), expectedPolicyCount); replLogger.startLog(); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.POLICIES.name(), (long) expectedPolicyCount); + work.getMetricCollector().reportStageStart(getName(), metricMap); if (rangerExportPolicyList != null && !CollectionUtils.isEmpty(rangerExportPolicyList.getPolicies())) { rangerPolicies = rangerExportPolicyList.getPolicies(); } @@ -129,13 +135,20 @@ public int execute() { rangerHiveServiceName); LOG.info("Number of ranger policies imported {}", rangerExportPolicyList.getListSize()); importCount = rangerExportPolicyList.getListSize(); + work.getMetricCollector().reportStageProgress(getName(), ReplUtils.MetricName.POLICIES.name(), importCount); replLogger.endLog(importCount); LOG.info("Ranger policy import finished {} ", importCount); } + work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS); return 0; } catch (Exception e) { LOG.error("Failed", e); setException(e); + try { + work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics", ex); + } return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadWork.java index cddca6076a..f42575b85d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadWork.java @@ -19,6 +19,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,11 +41,14 @@ private Path currentDumpPath; private String targetDbName; private String sourceDbName; + private final transient ReplicationMetricCollector metricCollector; - public RangerLoadWork(Path currentDumpPath, String sourceDbName, String targetDbName) { + public RangerLoadWork(Path currentDumpPath, String sourceDbName, String targetDbName, + ReplicationMetricCollector metricCollector) { this.currentDumpPath = currentDumpPath; this.targetDbName = targetDbName; this.sourceDbName = sourceDbName; + this.metricCollector = metricCollector; } public Path getCurrentDumpPath() { @@ -62,4 +66,8 @@ public String getSourceDbName() { URL getRangerConfigResource() { return getClass().getClassLoader().getResource(ReplUtils.RANGER_CONFIGURATION_RESOURCE_NAME); } + + ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java index 046b6a00de..f21fb7d3dd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.ReplChangeManager; @@ -71,7 +72,11 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.io.JsonWriter; import org.apache.hadoop.hive.ql.parse.repl.dump.log.BootstrapDumpLogger; import org.apache.hadoop.hive.ql.parse.repl.dump.log.IncrementalDumpLogger; +import org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.ExportWork.MmContext; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.thrift.TException; @@ -97,6 +102,8 @@ import java.util.LinkedList; import java.util.UUID; import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; import java.util.concurrent.TimeUnit; import static org.apache.hadoop.hive.ql.exec.repl.ReplExternalTables.Writer; import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT; @@ -147,6 +154,7 @@ public int execute() { Path currentDumpPath = getCurrentDumpPath(dumpRoot, isBootstrap); Path hiveDumpRoot = new Path(currentDumpPath, ReplUtils.REPL_HIVE_BASE_DIR); work.setCurrentDumpPath(currentDumpPath); + work.setMetricCollector(initMetricCollection(isBootstrap, hiveDumpRoot)); if (shouldDumpAtlasMetadata()) { addAtlasDumpTask(isBootstrap, previousValidHiveDumpPath); LOG.info("Added task to dump atlas metadata."); @@ -174,6 +182,11 @@ public int execute() { } catch (Exception e) { LOG.error("failed", e); setException(e); + try { + work.getMetricCollector().reportStageEnd(getName(), Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics", ex); + } return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } return 0; @@ -183,7 +196,8 @@ private void initiateAuthorizationDumpTask() throws SemanticException { if (RANGER_AUTHORIZER.equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.REPL_AUTHORIZATION_PROVIDER_SERVICE))) { Path rangerDumpRoot = new Path(work.getCurrentDumpPath(), ReplUtils.REPL_RANGER_BASE_DIR); LOG.info("Exporting Authorization Metadata from {} at {} ", RANGER_AUTHORIZER, rangerDumpRoot); - RangerDumpWork rangerDumpWork = new RangerDumpWork(rangerDumpRoot, work.dbNameOrPattern); + RangerDumpWork rangerDumpWork = new RangerDumpWork(rangerDumpRoot, work.dbNameOrPattern, + work.getMetricCollector()); Task rangerDumpTask = TaskFactory.get(rangerDumpWork, conf); if (childTasks == null) { childTasks = new ArrayList<>(); @@ -240,7 +254,8 @@ private void addAtlasDumpTask(boolean bootstrap, Path prevHiveDumpDir) { Path atlasDumpDir = new Path(work.getCurrentDumpPath(), ReplUtils.REPL_ATLAS_BASE_DIR); Path prevAtlasDumpDir = prevHiveDumpDir == null ? null : new Path(prevHiveDumpDir.getParent(), ReplUtils.REPL_ATLAS_BASE_DIR); - AtlasDumpWork atlasDumpWork = new AtlasDumpWork(work.dbNameOrPattern, atlasDumpDir, bootstrap, prevAtlasDumpDir); + AtlasDumpWork atlasDumpWork = new AtlasDumpWork(work.dbNameOrPattern, atlasDumpDir, bootstrap, prevAtlasDumpDir, + work.getMetricCollector()); Task atlasDumpTask = TaskFactory.get(atlasDumpWork, conf); childTasks = new ArrayList<>(); childTasks.add(atlasDumpTask); @@ -253,6 +268,7 @@ private void finishRemainingTasks() throws SemanticException { + ReplAck.DUMP_ACKNOWLEDGEMENT.toString()); Utils.create(dumpAckFile, conf); prepareReturnValues(work.getResultValues()); + work.getMetricCollector().reportEnd(Status.SUCCESS); deleteAllPreviousDumpMeta(work.getCurrentDumpPath()); } @@ -449,9 +465,7 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive long bootDumpBeginReplId = -1; List managedTableCopyPaths = Collections.emptyList(); List extTableCopyWorks = Collections.emptyList(); - List tableList = work.replScope.includeAllTables() ? null : new ArrayList<>(); - // If we are bootstrapping ACID tables, we need to perform steps similar to a regular // bootstrap (See bootstrapDump() for more details. Only difference here is instead of // waiting for the concurrent transactions to finish, we start dumping the incremental events @@ -465,29 +479,21 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive HiveConf.ConfVars.REPL_BOOTSTRAP_DUMP_OPEN_TXN_TIMEOUT, TimeUnit.MILLISECONDS); waitUntilTime = System.currentTimeMillis() + timeoutInMs; } - // TODO : instead of simply restricting by message format, we should eventually // move to a jdbc-driver-stype registering of message format, and picking message // factory per event to decode. For now, however, since all messages have the // same factory, restricting by message format is effectively a guard against // older leftover data that would cause us problems. - work.overrideLastEventToDump(hiveDb, bootDumpBeginReplId); - IMetaStoreClient.NotificationFilter evFilter = new AndFilter( new ReplEventFilter(work.replScope), new EventBoundaryFilter(work.eventFrom, work.eventTo)); - EventUtils.MSClientNotificationFetcher evFetcher = new EventUtils.MSClientNotificationFetcher(hiveDb); - - int maxEventLimit = getMaxEventAllowed(work.maxEventLimit()); EventUtils.NotificationEventIterator evIter = new EventUtils.NotificationEventIterator( evFetcher, work.eventFrom, maxEventLimit, evFilter); - lastReplId = work.eventTo; - Path ackFile = new Path(dumpRoot, ReplAck.EVENTS_DUMP.toString()); long resumeFrom = Utils.fileExists(ackFile, conf) ? getResumeFrom(ackFile) : work.eventFrom; @@ -499,10 +505,14 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive String dbName = (null != work.dbNameOrPattern && !work.dbNameOrPattern.isEmpty()) ? work.dbNameOrPattern : "?"; - replLogger = new IncrementalDumpLogger(dbName, dumpRoot.toString(), - evFetcher.getDbNotificationEventsCount(work.eventFrom, dbName, work.eventTo, maxEventLimit), + long estimatedNumEvents = evFetcher.getDbNotificationEventsCount(work.eventFrom, dbName, work.eventTo, + maxEventLimit); + replLogger = new IncrementalDumpLogger(dbName, dumpRoot.toString(), estimatedNumEvents, work.eventFrom, work.eventTo, maxEventLimit); replLogger.startLog(); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.EVENTS.name(), estimatedNumEvents); + work.getMetricCollector().reportStageStart(getName(), metricMap); long dumpedCount = resumeFrom - work.eventFrom; if (dumpedCount > 0) { LOG.info("Event id {} to {} are already dumped, skipping {} events", work.eventFrom, resumeFrom, dumpedCount); @@ -518,19 +528,16 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive dumpEvent(ev, evRoot, dumpRoot, cmRoot, hiveDb); Utils.writeOutput(String.valueOf(lastReplId), ackFile, conf); } - replLogger.endLog(lastReplId.toString()); - LOG.info("Done dumping events, preparing to return {},{}", dumpRoot.toUri(), lastReplId); - dmd.setDump(DumpType.INCREMENTAL, work.eventFrom, lastReplId, cmRoot); - + long executionId = conf.getLong(Constants.SCHEDULED_QUERY_EXECUTIONID, 0L); + dmd.setDump(DumpType.INCREMENTAL, work.eventFrom, lastReplId, cmRoot, executionId); // If repl policy is changed (oldReplScope is set), then pass the current replication policy, // so that REPL LOAD would drop the tables which are not included in current policy. if (work.oldReplScope != null) { dmd.setReplScope(work.replScope); } dmd.write(true); - // Examine all the tables if required. if (shouldExamineTablesToDump() || (tableList != null)) { // If required wait more for any transactions open at the time of starting the ACID bootstrap. @@ -538,7 +545,6 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive assert (waitUntilTime > 0); validTxnList = getValidTxnListForReplDump(hiveDb, waitUntilTime); } - /* When same dump dir is resumed because of check-pointing, we need to clear the existing metadata. We need to rewrite the metadata as the write id list will be changed. We can't reuse the previous write id as it might be invalid due to compaction. */ @@ -587,9 +593,20 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive } work.setDirCopyIterator(extTableCopyWorks.iterator()); work.setManagedTableCopyPathIterator(managedTableCopyPaths.iterator()); + work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS, lastReplId); return lastReplId; } + private ReplicationMetricCollector initMetricCollection(boolean isBootstrap, Path dumpRoot) { + ReplicationMetricCollector collector; + if (isBootstrap) { + collector = new BootstrapDumpMetricCollector(work.dbNameOrPattern, dumpRoot.toString(), conf); + } else { + collector = new IncrementalDumpMetricCollector(work.dbNameOrPattern, dumpRoot.toString(), conf); + } + return collector; + } + private int getMaxEventAllowed(int currentEventMaxLimit) { int maxDirItems = Integer.parseInt(conf.get(ReplUtils.DFS_MAX_DIR_ITEMS_CONFIG, "0")); if (maxDirItems > 0) { @@ -603,7 +620,6 @@ private int getMaxEventAllowed(int currentEventMaxLimit) { } return currentEventMaxLimit; } - private void cleanFailedEventDirIfExists(Path dumpDir, long resumeFrom) throws IOException { Path nextEventRoot = new Path(dumpDir, String.valueOf(resumeFrom + 1)); Retry retriable = new Retry(IOException.class) { @@ -674,6 +690,7 @@ private void dumpEvent(NotificationEvent ev, Path evRoot, Path dumpRoot, Path cm ); EventHandler eventHandler = EventHandlerFactory.handlerFor(ev); eventHandler.handle(context); + work.getMetricCollector().reportStageProgress(getName(), ReplUtils.MetricName.EVENTS.name(), 1); replLogger.eventLog(String.valueOf(ev.getEventId()), eventHandler.dumpType().toString()); } @@ -779,10 +796,16 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb) throw new HiveException("Replication dump not allowed for replicated database" + " with first incremental dump pending : " + dbName); } + int estimatedNumTables = Utils.getAllTables(hiveDb, dbName, work.replScope).size(); + int estimatedNumFunctions = hiveDb.getAllFunctions().size(); replLogger = new BootstrapDumpLogger(dbName, dumpRoot.toString(), - Utils.getAllTables(hiveDb, dbName, work.replScope).size(), - hiveDb.getAllFunctions().size()); + estimatedNumTables, + estimatedNumFunctions); replLogger.startLog(); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) estimatedNumTables); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) estimatedNumFunctions); + work.getMetricCollector().reportStageStart(getName(), metricMap); Path dbRoot = dumpDbMetadata(dbName, metadataPath, bootDumpBeginReplId, hiveDb); Path dbDataRoot = new Path(new Path(dumpRoot, EximUtil.DATA_PATH_NAME), dbName); dumpFunctionMetadata(dbName, dbRoot, hiveDb); @@ -841,11 +864,13 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb) Long bootDumpEndReplId = currentNotificationId(hiveDb); LOG.info("Preparing to return {},{}->{}", dumpRoot.toUri(), bootDumpBeginReplId, bootDumpEndReplId); - dmd.setDump(DumpType.BOOTSTRAP, bootDumpBeginReplId, bootDumpEndReplId, cmRoot); + long executorId = conf.getLong(Constants.SCHEDULED_QUERY_EXECUTIONID, 0L); + dmd.setDump(DumpType.BOOTSTRAP, bootDumpBeginReplId, bootDumpEndReplId, cmRoot, executorId); dmd.write(true); work.setDirCopyIterator(extTableCopyWorks.iterator()); work.setManagedTableCopyPathIterator(managedTableCopyPaths.iterator()); + work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS, bootDumpBeginReplId); return bootDumpBeginReplId; } @@ -912,7 +937,9 @@ Path dumpDbMetadata(String dbName, Path metadataRoot, long lastReplId, Hive hive MmContext mmCtx = MmContext.createIfNeeded(tableSpec.tableHandle); tuple.replicationSpec.setRepl(true); List managedTableCopyPaths = new TableExport( - exportPaths, tableSpec, tuple.replicationSpec, hiveDb, distCpDoAsUser, conf, mmCtx).write(false); + exportPaths, tableSpec, tuple.replicationSpec, hiveDb, distCpDoAsUser, + conf, mmCtx).write(false); + work.getMetricCollector().reportStageProgress(getName(), ReplUtils.MetricName.TABLES.name(), 1); replLogger.tableLog(tblName, tableSpec.tableHandle.getTableType()); if (tableSpec.tableHandle.getTableType().equals(TableType.EXTERNAL_TABLE) || Utils.shouldDumpMetaDataOnly(conf)) { @@ -1042,6 +1069,7 @@ void dumpFunctionMetadata(String dbName, Path dbMetadataRoot, Hive hiveDb) throw FunctionSerializer serializer = new FunctionSerializer(tuple.object, conf); serializer.writeTo(jsonWriter, tuple.replicationSpec); } + work.getMetricCollector().reportStageProgress(getName(), ReplUtils.MetricName.FUNCTIONS.name(), 1); replLogger.functionLog(functionName); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java index 86f92338a6..59cae6b9fa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.ql.exec.repl.util.TaskTracker; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.parse.EximUtil; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,6 +57,7 @@ private Path currentDumpPath; private List resultValues; private boolean shouldOverwrite; + private transient ReplicationMetricCollector metricCollector; public static void injectNextDumpDirForTest(String dumpDir) { injectNextDumpDirForTest(dumpDir, false); @@ -190,4 +192,12 @@ public void setResultValues(List resultValues) { public void setShouldOverwrite(boolean shouldOverwrite) { this.shouldOverwrite = shouldOverwrite; } + + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + + public void setMetricCollector(ReplicationMetricCollector metricCollector) { + this.metricCollector = metricCollector; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java index 792e331884..37cc6cd454 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java @@ -54,12 +54,13 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.HiveTableName; -import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer; -import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; import org.apache.hadoop.hive.ql.parse.repl.load.MetaData; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.api.StageType; import java.io.IOException; @@ -120,10 +121,20 @@ public int execute() { } } catch (RuntimeException e) { LOG.error("replication failed with run time exception", e); + try { + work.getMetricCollector().reportEnd(Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics ", ex); + } throw e; } catch (Exception e) { LOG.error("replication failed", e); setException(e); + try { + work.getMetricCollector().reportEnd(Status.FAILED); + } catch (SemanticException ex) { + LOG.error("Failed to collect Metrics ", ex); + } return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } } @@ -136,7 +147,8 @@ private void initiateAuthorizationLoadTask() throws SemanticException { if (RANGER_AUTHORIZER.equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.REPL_AUTHORIZATION_PROVIDER_SERVICE))) { Path rangerLoadRoot = new Path(new Path(work.dumpDirectory).getParent(), ReplUtils.REPL_RANGER_BASE_DIR); LOG.info("Adding Import Ranger Metadata Task from {} ", rangerLoadRoot); - RangerLoadWork rangerLoadWork = new RangerLoadWork(rangerLoadRoot, work.getSourceDbName(), work.dbNameToLoadIn); + RangerLoadWork rangerLoadWork = new RangerLoadWork(rangerLoadRoot, work.getSourceDbName(), work.dbNameToLoadIn, + work.getMetricCollector()); Task rangerLoadTask = TaskFactory.get(rangerLoadWork, conf); if (childTasks == null) { childTasks = new ArrayList<>(); @@ -151,7 +163,8 @@ private void initiateAuthorizationLoadTask() throws SemanticException { private void addAtlasLoadTask() throws HiveException { Path atlasDumpDir = new Path(new Path(work.dumpDirectory).getParent(), ReplUtils.REPL_ATLAS_BASE_DIR); LOG.info("Adding task to load Atlas metadata from {} ", atlasDumpDir); - AtlasLoadWork atlasLoadWork = new AtlasLoadWork(work.getSourceDbName(), work.dbNameToLoadIn, atlasDumpDir); + AtlasLoadWork atlasLoadWork = new AtlasLoadWork(work.getSourceDbName(), work.dbNameToLoadIn, atlasDumpDir, + work.getMetricCollector()); Task atlasLoadTask = TaskFactory.get(atlasLoadWork, conf); if (childTasks == null) { childTasks = new ArrayList<>(); @@ -228,7 +241,7 @@ a database ( directory ) tableTracker.addTask(createViewTask(tableEvent.getMetaData(), work.dbNameToLoadIn, conf)); } else { LoadTable loadTable = new LoadTable(tableEvent, loadContext, iterator.replLogger(), tableContext, - loadTaskTracker); + loadTaskTracker, work.getMetricCollector()); tableTracker = loadTable.tasks(work.isIncrementalLoad()); } @@ -254,7 +267,7 @@ a database ( directory ) // for a table we explicitly try to load partitions as there is no separate partitions events. LoadPartitions loadPartitions = new LoadPartitions(loadContext, iterator.replLogger(), loadTaskTracker, tableEvent, - work.dbNameToLoadIn, tableContext); + work.dbNameToLoadIn, tableContext, work.getMetricCollector()); TaskTracker partitionsTracker = loadPartitions.tasks(); partitionsPostProcessing(iterator, scope, loadTaskTracker, tableTracker, partitionsTracker); @@ -321,7 +334,7 @@ private TaskTracker addLoadPartitionTasks(Context loadContext, BootstrapEvent ne TableContext tableContext = new TableContext(dbTracker, work.dbNameToLoadIn); LoadPartitions loadPartitions = new LoadPartitions(loadContext, iterator.replLogger(), tableContext, loadTaskTracker, - event.asTableEvent(), work.dbNameToLoadIn, event.lastPartitionReplicated()); + event.asTableEvent(), work.dbNameToLoadIn, event.lastPartitionReplicated(), work.getMetricCollector()); /* the tableTracker here should be a new instance and not an existing one as this can only happen when we break in between loading partitions. @@ -348,7 +361,7 @@ private TaskTracker addLoadConstraintsTasks(Context loadContext, private TaskTracker addLoadFunctionTasks(Context loadContext, BootstrapEventsIterator iterator, BootstrapEvent next, TaskTracker dbTracker, Scope scope) throws IOException, SemanticException { LoadFunction loadFunction = new LoadFunction(loadContext, iterator.replLogger(), - (FunctionEvent) next, work.dbNameToLoadIn, dbTracker); + (FunctionEvent) next, work.dbNameToLoadIn, dbTracker, work.getMetricCollector()); TaskTracker functionsTracker = loadFunction.tasks(); if (!scope.database) { scope.rootTasks.addAll(functionsTracker.tasks()); @@ -442,7 +455,7 @@ private void createEndReplLogTask(Context context, Scope scope, Database dbInMetadata = work.databaseEvent(context.hiveConf).dbInMetadata(work.dbNameToLoadIn); dbProps = dbInMetadata.getParameters(); } - ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, dbProps); + ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, dbProps, work.getMetricCollector()); Task replLogTask = TaskFactory.get(replLogWork, conf); if (scope.rootTasks.isEmpty()) { scope.rootTasks.add(replLogTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java index 26cd59b082..43bf365b4f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java @@ -29,6 +29,8 @@ import org.apache.hadoop.hive.ql.exec.repl.incremental.IncrementalLoadTasksBuilder; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.EximUtil; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.session.LineageState; import org.apache.hadoop.hive.ql.exec.Task; @@ -45,6 +47,8 @@ final String dumpDirectory; private boolean lastReplIDUpdated; private String sourceDbName; + private Long dumpExecutionId; + private final transient ReplicationMetricCollector metricCollector; private final ConstraintEventsIterator constraintsIterator; private int loadTaskRunCount = 0; @@ -62,12 +66,17 @@ public ReplLoadWork(HiveConf hiveConf, String dumpDirectory, String sourceDbName, String dbNameToLoadIn, ReplScope currentReplScope, - LineageState lineageState, boolean isIncrementalDump, Long eventTo) throws IOException { + LineageState lineageState, boolean isIncrementalDump, Long eventTo, + Long dumpExecutionId, + ReplicationMetricCollector metricCollector) throws IOException, SemanticException { sessionStateLineageState = lineageState; this.dumpDirectory = dumpDirectory; this.dbNameToLoadIn = dbNameToLoadIn; this.currentReplScope = currentReplScope; this.sourceDbName = sourceDbName; + this.dumpExecutionId = dumpExecutionId; + this.metricCollector = metricCollector; + // If DB name is changed during REPL LOAD, then set it instead of referring to source DB name. if ((currentReplScope != null) && StringUtils.isNotBlank(dbNameToLoadIn)) { @@ -77,7 +86,7 @@ public ReplLoadWork(HiveConf hiveConf, String dumpDirectory, rootTask = null; if (isIncrementalDump) { incrementalLoadTasksBuilder = new IncrementalLoadTasksBuilder(dbNameToLoadIn, dumpDirectory, - new IncrementalLoadEventsIterator(dumpDirectory, hiveConf), hiveConf, eventTo); + new IncrementalLoadEventsIterator(dumpDirectory, hiveConf), hiveConf, eventTo, metricCollector); /* * If the current incremental dump also includes bootstrap for some tables, then create iterator @@ -87,7 +96,8 @@ public ReplLoadWork(HiveConf hiveConf, String dumpDirectory, FileSystem fs = incBootstrapDir.getFileSystem(hiveConf); if (fs.exists(incBootstrapDir)) { this.bootstrapIterator = new BootstrapEventsIterator( - new Path(incBootstrapDir, EximUtil.METADATA_PATH_NAME).toString(), dbNameToLoadIn, true, hiveConf); + new Path(incBootstrapDir, EximUtil.METADATA_PATH_NAME).toString(), dbNameToLoadIn, true, + hiveConf, metricCollector); this.constraintsIterator = new ConstraintEventsIterator(dumpDirectory, hiveConf); } else { this.bootstrapIterator = null; @@ -95,7 +105,7 @@ public ReplLoadWork(HiveConf hiveConf, String dumpDirectory, } } else { this.bootstrapIterator = new BootstrapEventsIterator(new Path(dumpDirectory, EximUtil.METADATA_PATH_NAME) - .toString(), dbNameToLoadIn, true, hiveConf); + .toString(), dbNameToLoadIn, true, hiveConf, metricCollector); this.constraintsIterator = new ConstraintEventsIterator( new Path(dumpDirectory, EximUtil.METADATA_PATH_NAME).toString(), hiveConf); incrementalLoadTasksBuilder = null; @@ -158,4 +168,12 @@ public void setLastReplIDUpdated(boolean lastReplIDUpdated) { public String getSourceDbName() { return sourceDbName; } + + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + + public Long getDumpExecutionId() { + return dumpExecutionId; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java index 7ade7c07d7..240f5a7db6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hive.ql.exec.repl; +import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.api.StageType; import java.io.Serializable; @@ -34,7 +36,13 @@ @Override public int execute() { - work.replStateLog(); + try { + work.replStateLog(); + } catch (SemanticException e) { + LOG.error("Exception while logging metrics ", e); + setException(e); + return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); + } return 0; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java index 37725d68c6..11d7b9861c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java @@ -19,8 +19,13 @@ package org.apache.hadoop.hive.ql.exec.repl; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.utils.StringUtils; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -43,6 +48,7 @@ private TableType tableType; private String functionName; private String lastReplId; + private final transient ReplicationMetricCollector metricCollector; private enum LOG_TYPE { TABLE, @@ -51,50 +57,62 @@ END } - public ReplStateLogWork(ReplLogger replLogger, String eventId, String eventType) { + public ReplStateLogWork(ReplLogger replLogger, ReplicationMetricCollector metricCollector, + String eventId, String eventType) { this.logType = LOG_TYPE.EVENT; this.replLogger = replLogger; this.eventId = eventId; this.eventType = eventType; + this.metricCollector = metricCollector; } - public ReplStateLogWork(ReplLogger replLogger, String tableName, TableType tableType) { + public ReplStateLogWork(ReplLogger replLogger, ReplicationMetricCollector metricCollector, + String tableName, TableType tableType) { this.logType = LOG_TYPE.TABLE; this.replLogger = replLogger; this.tableName = tableName; this.tableType = tableType; + this.metricCollector = metricCollector; } - public ReplStateLogWork(ReplLogger replLogger, String functionName) { + public ReplStateLogWork(ReplLogger replLogger, String functionName, ReplicationMetricCollector metricCollector) { this.logType = LOG_TYPE.FUNCTION; this.replLogger = replLogger; this.functionName = functionName; + this.metricCollector = metricCollector; } - public ReplStateLogWork(ReplLogger replLogger, Map dbProps) { + public ReplStateLogWork(ReplLogger replLogger, Map dbProps, + ReplicationMetricCollector metricCollector) { this.logType = LOG_TYPE.END; this.replLogger = replLogger; this.lastReplId = ReplicationSpec.getLastReplicatedStateFromParameters(dbProps); + this.metricCollector = metricCollector; } - public void replStateLog() { + public void replStateLog() throws SemanticException { switch (logType) { - case TABLE: { - replLogger.tableLog(tableName, tableType); - break; - } - case FUNCTION: { - replLogger.functionLog(functionName); - break; - } - case EVENT: { - replLogger.eventLog(eventId, eventType); - break; - } - case END: { - replLogger.endLog(lastReplId); - break; + case TABLE: + replLogger.tableLog(tableName, tableType); + metricCollector.reportStageProgress("REPL_LOAD", ReplUtils.MetricName.TABLES.name(), 1); + break; + case FUNCTION: + replLogger.functionLog(functionName); + metricCollector.reportStageProgress("REPL_LOAD", ReplUtils.MetricName.FUNCTIONS.name(), 1); + break; + case EVENT: + replLogger.eventLog(eventId, eventType); + metricCollector.reportStageProgress("REPL_LOAD", ReplUtils.MetricName.EVENTS.name(), 1); + break; + case END: + replLogger.endLog(lastReplId); + if (StringUtils.isEmpty(lastReplId) || lastReplId.equalsIgnoreCase("null")) { + metricCollector.reportStageEnd("REPL_LOAD", Status.SUCCESS); + } else { + metricCollector.reportStageEnd("REPL_LOAD", Status.SUCCESS, Long.parseLong(lastReplId)); } + metricCollector.reportEnd(Status.SUCCESS); + break; } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java index 5bbe20c8c6..0cb818a7f7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java @@ -23,13 +23,17 @@ import org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.ReplicationState; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.events.BootstrapEvent; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.load.log.BootstrapLoadLogger; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import java.io.IOException; import java.util.Arrays; import java.util.Iterator; import java.util.List; +import java.util.Map; +import java.util.HashMap; import java.util.function.Consumer; import java.util.stream.Collectors; @@ -76,9 +80,12 @@ private final HiveConf hiveConf; private final boolean needLogger; private ReplLogger replLogger; + private final transient ReplicationMetricCollector metricCollector; - public BootstrapEventsIterator(String dumpDirectory, String dbNameToLoadIn, boolean needLogger, HiveConf hiveConf) + public BootstrapEventsIterator(String dumpDirectory, String dbNameToLoadIn, boolean needLogger, HiveConf hiveConf, + ReplicationMetricCollector metricCollector) throws IOException { + this.metricCollector = metricCollector; Path path = new Path(dumpDirectory); FileSystem fileSystem = path.getFileSystem(hiveConf); if (!fileSystem.exists(path)) { @@ -123,6 +130,7 @@ public boolean hasNext() { if (needLogger) { initReplLogger(); } + initMetricCollector(); } else { return false; } @@ -161,17 +169,16 @@ public ReplLogger replLogger() { return replLogger; } + public ReplicationMetricCollector getMetricCollector() { + return metricCollector; + } + private void initReplLogger() { try { Path dbDumpPath = currentDatabaseIterator.dbLevelPath(); FileSystem fs = dbDumpPath.getFileSystem(hiveConf); - - long numTables = getSubDirs(fs, dbDumpPath).length; - long numFunctions = 0; - Path funcPath = new Path(dbDumpPath, ReplUtils.FUNCTIONS_ROOT_DIR_NAME); - if (fs.exists(funcPath)) { - numFunctions = getSubDirs(fs, funcPath).length; - } + long numTables = getNumTables(dbDumpPath, fs); + long numFunctions = getNumFunctions(dbDumpPath, fs); String dbName = StringUtils.isBlank(dbNameToLoadIn) ? dbDumpPath.getName() : dbNameToLoadIn; replLogger = new BootstrapLoadLogger(dbName, dumpDirectory, numTables, numFunctions); replLogger.startLog(); @@ -180,6 +187,33 @@ private void initReplLogger() { } } + private long getNumFunctions(Path dbDumpPath, FileSystem fs) throws IOException { + Path funcPath = new Path(dbDumpPath, ReplUtils.FUNCTIONS_ROOT_DIR_NAME); + if (fs.exists(funcPath)) { + return getSubDirs(fs, funcPath).length; + } + return 0; + } + + private long getNumTables(Path dbDumpPath, FileSystem fs) throws IOException { + return getSubDirs(fs, dbDumpPath).length; + } + + private void initMetricCollector() { + try { + Path dbDumpPath = currentDatabaseIterator.dbLevelPath(); + FileSystem fs = dbDumpPath.getFileSystem(hiveConf); + long numTables = getNumTables(dbDumpPath, fs); + long numFunctions = getNumFunctions(dbDumpPath, fs); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), numTables); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), numFunctions); + metricCollector.reportStageStart("REPL_LOAD", metricMap); + } catch (Exception e) { + throw new RuntimeException("Failed to collect Metrics ", e); + } + } + FileStatus[] getSubDirs(FileSystem fs, Path dirPath) throws IOException { return fs.listStatus(dirPath, new PathFilter() { @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java index 8815eeebe1..667ec7ff31 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.ql.parse.repl.load.MetaData; import org.apache.hadoop.hive.ql.parse.repl.load.message.CreateFunctionHandler; import org.apache.hadoop.hive.ql.parse.repl.load.message.MessageHandler; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -55,19 +56,21 @@ private final FunctionEvent event; private final String dbNameToLoadIn; private final TaskTracker tracker; + private final ReplicationMetricCollector metricCollector; public LoadFunction(Context context, ReplLogger replLogger, FunctionEvent event, - String dbNameToLoadIn, TaskTracker existingTracker) { + String dbNameToLoadIn, TaskTracker existingTracker, ReplicationMetricCollector metricCollector) { this.context = context; this.replLogger = replLogger; this.event = event; this.dbNameToLoadIn = dbNameToLoadIn; this.tracker = new TaskTracker(existingTracker); + this.metricCollector = metricCollector; } private void createFunctionReplLogTask(List> functionTasks, String functionName) { - ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, functionName); + ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, functionName, metricCollector); Task replLogTask = TaskFactory.get(replLogWork, context.hiveConf); DAGTraversal.traverse(functionTasks, new AddDependencyToLeaves(replLogTask)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java index b36c4a531f..b78df44e84 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; @@ -77,19 +78,22 @@ private final TableEvent event; private final TaskTracker tracker; private final AlterTableAddPartitionDesc lastReplicatedPartition; + private final ReplicationMetricCollector metricCollector; private final ImportTableDesc tableDesc; private Table table; public LoadPartitions(Context context, ReplLogger replLogger, TaskTracker tableTracker, TableEvent event, String dbNameToLoadIn, - TableContext tableContext) throws HiveException { - this(context, replLogger, tableContext, tableTracker, event, dbNameToLoadIn, null); + TableContext tableContext, ReplicationMetricCollector metricCollector) throws HiveException { + this(context, replLogger, tableContext, tableTracker, event, dbNameToLoadIn, null, + metricCollector); } public LoadPartitions(Context context, ReplLogger replLogger, TableContext tableContext, TaskTracker limiter, TableEvent event, String dbNameToLoadIn, - AlterTableAddPartitionDesc lastReplicatedPartition) throws HiveException { + AlterTableAddPartitionDesc lastReplicatedPartition, + ReplicationMetricCollector metricCollector) throws HiveException { this.tracker = new TaskTracker(limiter); this.event = event; this.context = context; @@ -99,6 +103,7 @@ public LoadPartitions(Context context, ReplLogger replLogger, TableContext table this.tableDesc = event.tableDesc(dbNameToLoadIn); this.table = ImportSemanticAnalyzer.tableIfExists(tableDesc, context.hiveDb); + this.metricCollector = metricCollector; } public TaskTracker tasks() throws Exception { @@ -118,7 +123,7 @@ public TaskTracker tasks() throws Exception { if (!forNewTable().hasReplicationState()) { // Add ReplStateLogTask only if no pending table load tasks left for next cycle Task replLogTask - = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf); + = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector); tracker.addDependentTask(replLogTask); } return tracker; @@ -132,7 +137,7 @@ public TaskTracker tasks() throws Exception { if (!forExistingTable(lastReplicatedPartition).hasReplicationState()) { // Add ReplStateLogTask only if no pending table load tasks left for next cycle Task replLogTask - = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf); + = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector); tracker.addDependentTask(replLogTask); } return tracker; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java index 6cea22c01f..9e236fd697 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.ImportTableDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; @@ -73,14 +74,16 @@ private final TableContext tableContext; private final TaskTracker tracker; private final TableEvent event; + private final ReplicationMetricCollector metricCollector; public LoadTable(TableEvent event, Context context, ReplLogger replLogger, - TableContext tableContext, TaskTracker limiter) { + TableContext tableContext, TaskTracker limiter, ReplicationMetricCollector metricCollector) { this.event = event; this.context = context; this.replLogger = replLogger; this.tableContext = tableContext; this.tracker = new TaskTracker(limiter); + this.metricCollector = metricCollector; } public TaskTracker tasks(boolean isBootstrapDuringInc) throws Exception { @@ -151,7 +154,7 @@ public TaskTracker tasks(boolean isBootstrapDuringInc) throws Exception { ); if (!isPartitioned(tableDesc)) { Task replLogTask - = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf); + = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector); ckptTask.addDependentTask(replLogTask); } tracker.addDependentTask(ckptTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java index 7e844d3164..52b6547e95 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork; import org.apache.hadoop.hive.ql.exec.repl.util.AddDependencyToLeaves; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.exec.repl.util.TaskTracker; import org.apache.hadoop.hive.ql.exec.util.DAGTraversal; import org.apache.hadoop.hive.ql.hooks.ReadEntity; @@ -49,6 +50,7 @@ import org.apache.hadoop.hive.ql.parse.repl.load.UpdatedMetaDataTracker; import org.apache.hadoop.hive.ql.parse.repl.load.log.IncrementalLoadLogger; import org.apache.hadoop.hive.ql.parse.repl.load.message.MessageHandler; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.ReplTxnWork; import org.slf4j.Logger; @@ -73,9 +75,12 @@ private final ReplLogger replLogger; private static long numIteration; private final Long eventTo; + private final ReplicationMetricCollector metricCollector; public IncrementalLoadTasksBuilder(String dbName, String loadPath, - IncrementalLoadEventsIterator iterator, HiveConf conf, Long eventTo) { + IncrementalLoadEventsIterator iterator, HiveConf conf, + Long eventTo, + ReplicationMetricCollector metricCollector) throws SemanticException { this.dbName = dbName; this.iterator = iterator; inputs = new HashSet<>(); @@ -85,7 +90,11 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, replLogger = new IncrementalLoadLogger(dbName, loadPath, iterator.getNumEvents()); replLogger.startLog(); this.eventTo = eventTo; - numIteration = 0; + setNumIteration(0); + this.metricCollector = metricCollector; + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.EVENTS.name(), (long) iterator.getNumEvents()); + this.metricCollector.reportStageStart("REPL_LOAD", metricMap); } public Task build(Context context, Hive hive, Logger log, @@ -96,7 +105,6 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, this.log = log; numIteration++; this.log.debug("Iteration num " + numIteration); - while (iterator.hasNext() && tracker.canAddMoreTasks()) { FileStatus dir = iterator.next(); String location = dir.getPath().toUri().toString(); @@ -135,7 +143,7 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, List> evTasks = analyzeEventLoad(mhContext); if ((evTasks != null) && (!evTasks.isEmpty())) { - ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, + ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, metricCollector, dir.getPath().getName(), eventDmd.getDumpType().toString()); Task barrierTask = TaskFactory.get(replStateLogWork, conf); @@ -157,7 +165,7 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, Map dbProps = new HashMap<>(); dbProps.put(ReplicationSpec.KEY.CURR_STATE_ID.toString(), String.valueOf(lastReplayedEvent)); - ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, dbProps); + ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, dbProps, metricCollector); Task barrierTask = TaskFactory.get(replStateLogWork, conf); taskChainTail.addDependentTask(barrierTask); this.log.debug("Added {}:{} as a precursor of barrier task {}:{}", @@ -364,6 +372,10 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa return tasks; } + private static void setNumIteration(int count) { + numIteration = count; + } + public Long eventTo() { return eventTo; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java index c0aadb5aa2..ecf51a9f32 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork; import org.apache.hadoop.hive.ql.plan.ReplTxnWork; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; @@ -134,6 +135,13 @@ LOAD_NEW, LOAD_SKIP, LOAD_REPLACE } + /** + * Replication Metrics. + */ + public enum MetricName { + TABLES, FUNCTIONS, EVENTS, POLICIES, ENTITIES + } + public static Map> genPartSpecs( Table table, List> partitions) throws SemanticException { Map> partSpecs = new HashMap<>(); @@ -167,10 +175,12 @@ return partSpecs; } - public static Task getTableReplLogTask(ImportTableDesc tableDesc, ReplLogger replLogger, HiveConf conf) + public static Task getTableReplLogTask(ImportTableDesc tableDesc, ReplLogger replLogger, HiveConf conf, + ReplicationMetricCollector metricCollector) throws SemanticException { TableType tableType = tableDesc.isExternal() ? TableType.EXTERNAL_TABLE : tableDesc.tableType(); - ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, tableDesc.getTableName(), tableType); + ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, metricCollector, + tableDesc.getTableName(), tableType); return TaskFactory.get(replLogWork, conf); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java index 7959df2b2f..ed358f379a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java @@ -41,6 +41,9 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.plan.PlanUtils; import java.io.IOException; @@ -398,7 +401,9 @@ private void analyzeReplLoad(ASTNode ast) throws SemanticException { ReplLoadWork replLoadWork = new ReplLoadWork(conf, loadPath.toString(), sourceDbNameOrPattern, replScope.getDbName(), dmd.getReplScope(), - queryState.getLineageState(), evDump, dmd.getEventTo()); + queryState.getLineageState(), evDump, dmd.getEventTo(), dmd.getDumpExecutionId(), + initMetricCollection(!evDump, loadPath.toString(), replScope.getDbName(), + dmd.getDumpExecutionId())); rootTasks.add(TaskFactory.get(replLoadWork, conf)); } else { LOG.warn("Previous Dump Already Loaded"); @@ -409,6 +414,17 @@ private void analyzeReplLoad(ASTNode ast) throws SemanticException { } } + private ReplicationMetricCollector initMetricCollection(boolean isBootstrap, String dumpDirectory, + String dbNameToLoadIn, long dumpExecutionId) { + ReplicationMetricCollector collector; + if (isBootstrap) { + collector = new BootstrapLoadMetricCollector(dbNameToLoadIn, dumpDirectory, dumpExecutionId, conf); + } else { + collector = new IncrementalLoadMetricCollector(dbNameToLoadIn, dumpDirectory, dumpExecutionId, conf); + } + return collector; + } + private Path getCurrentLoadPath() throws IOException, SemanticException { Path loadPathBase = new Path(conf.getVar(HiveConf.ConfVars.REPLDIR), Base64.getEncoder().encodeToString(sourceDbNameOrPattern.toLowerCase() diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/BootstrapDumpMetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/BootstrapDumpMetricCollector.java new file mode 100644 index 0000000000..48a37b6171 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/BootstrapDumpMetricCollector.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.dump.metric; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; + +/** + * BootstrapDumpMetricCollector. + * Bootstrap Dump Metric Collector + */ +public class BootstrapDumpMetricCollector extends ReplicationMetricCollector { + public BootstrapDumpMetricCollector(String dbName, String stagingDir, HiveConf conf) { + super(dbName, Metadata.ReplicationType.BOOTSTRAP, stagingDir, 0, conf); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/IncrementalDumpMetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/IncrementalDumpMetricCollector.java new file mode 100644 index 0000000000..2c0eb473f3 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/IncrementalDumpMetricCollector.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.dump.metric; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; + +/** + * IncrementalDumpMetricCollector. + * Incremental Dump Metric Collector + */ +public class IncrementalDumpMetricCollector extends ReplicationMetricCollector { + public IncrementalDumpMetricCollector(String dbName, String stagingDir, HiveConf conf) { + super(dbName, Metadata.ReplicationType.INCREMENTAL, stagingDir, 0, conf); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java index e538c79f34..dc40e1df9a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java @@ -51,6 +51,7 @@ private boolean initialized = false; private final Path dumpFile; private final HiveConf hiveConf; + private Long dumpExecutionId; public DumpMetaData(Path dumpRoot, HiveConf hiveConf) { this.hiveConf = hiveConf; @@ -60,15 +61,16 @@ public DumpMetaData(Path dumpRoot, HiveConf hiveConf) { public DumpMetaData(Path dumpRoot, DumpType lvl, Long eventFrom, Long eventTo, Path cmRoot, HiveConf hiveConf) { this(dumpRoot, hiveConf); - setDump(lvl, eventFrom, eventTo, cmRoot); + setDump(lvl, eventFrom, eventTo, cmRoot, 0L); } - public void setDump(DumpType lvl, Long eventFrom, Long eventTo, Path cmRoot) { + public void setDump(DumpType lvl, Long eventFrom, Long eventTo, Path cmRoot, Long dumpExecutionId) { this.dumpType = lvl; this.eventFrom = eventFrom; this.eventTo = eventTo; this.cmRoot = cmRoot; this.initialized = true; + this.dumpExecutionId = dumpExecutionId; } public void setPayload(String payload) { @@ -115,11 +117,11 @@ private void loadDumpFromFile() throws SemanticException { br = new BufferedReader(new InputStreamReader(fs.open(dumpFile))); String line; if ((line = br.readLine()) != null) { - String[] lineContents = line.split("\t", 5); + String[] lineContents = line.split("\t", 6); setDump(DumpType.valueOf(lineContents[0]), Long.valueOf(lineContents[1]), Long.valueOf(lineContents[2]), - new Path(lineContents[3])); - setPayload(lineContents[4].equals(Utilities.nullStringOutput) ? null : lineContents[4]); + new Path(lineContents[3]), Long.valueOf(lineContents[4])); + setPayload(lineContents[5].equals(Utilities.nullStringOutput) ? null : lineContents[5]); } else { throw new IOException( "Unable to read valid values from dumpFile:" + dumpFile.toUri().toString()); @@ -158,6 +160,11 @@ public Long getEventTo() throws SemanticException { return eventTo; } + public Long getDumpExecutionId() throws SemanticException { + initializeIfNot(); + return dumpExecutionId; + } + public ReplScope getReplScope() throws SemanticException { initializeIfNot(); return replScope; @@ -207,6 +214,7 @@ public void write(boolean replace) throws SemanticException { eventFrom.toString(), eventTo.toString(), cmRoot.toString(), + dumpExecutionId.toString(), payload) ); if (replScope != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/BootstrapLoadMetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/BootstrapLoadMetricCollector.java new file mode 100644 index 0000000000..2a1b98e649 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/BootstrapLoadMetricCollector.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.load.metric; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; + +/** + * BootstrapLoadMetricCollector. + * Bootstrap Load Metric Collector + */ +public class BootstrapLoadMetricCollector extends ReplicationMetricCollector { + public BootstrapLoadMetricCollector(String dbName, String stagingDir, long dumpExecutionId, HiveConf conf) { + super(dbName, Metadata.ReplicationType.BOOTSTRAP, stagingDir, dumpExecutionId, conf); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/IncrementalLoadMetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/IncrementalLoadMetricCollector.java new file mode 100644 index 0000000000..57c9720acb --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/IncrementalLoadMetricCollector.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.load.metric; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; + +/** + * IncrementalLoadMetricCollector. + * Incremental Load Metric Collector + */ +public class IncrementalLoadMetricCollector extends ReplicationMetricCollector { + public IncrementalLoadMetricCollector(String dbName, String stagingDir, long dumpExecutionId, HiveConf conf) { + super(dbName, Metadata.ReplicationType.INCREMENTAL, stagingDir, dumpExecutionId, conf); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/MetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/MetricCollector.java new file mode 100644 index 0000000000..9dbaf51581 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/MetricCollector.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse.repl.metric; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * MetricCollector. + * In memory collection of metrics + */ +public class MetricCollector { + private static final Logger LOG = LoggerFactory.getLogger(MetricCollector.class); + private final Map metricMap = new HashMap<>(); + private long maxSize = 0; + private boolean isInited = false; + private static volatile MetricCollector instance; + + private MetricCollector(){ + } + + public static MetricCollector getInstance() { + if (instance == null) { + synchronized (MetricCollector.class) { + if (instance == null) { + instance = new MetricCollector(); + } + } + } + return instance; + } + + public synchronized MetricCollector init(HiveConf conf) { + //Can initialize the cache only once with a value. + if (!isInited) { + maxSize = getMaxSize(conf); + isInited = true; + } + return instance; + } + + long getMaxSize(HiveConf conf) { + return MetastoreConf.getLongVar(conf, MetastoreConf.ConfVars.REPL_METRICS_CACHE_MAXSIZE); + } + + public synchronized void addMetric(ReplicationMetric replicationMetric) throws SemanticException { + if (metricMap.size() >= maxSize) { + throw new SemanticException("Metrics are not getting collected. "); + } else { + if (metricMap.size() > 0.8 * maxSize) { //soft limit + LOG.warn("Metrics cache is more than 80 % full. Will start dropping metrics once full. "); + } + metricMap.put(replicationMetric.getScheduledExecutionId(), replicationMetric); + } + } + + public synchronized List getMetrics() { + List metricList = new ArrayList<>(metricMap.values()); + metricMap.entrySet().removeIf(e -> !Status.IN_PROGRESS.equals(e.getValue().getProgress().getStatus())); + return metricList; + } + + //For testing + synchronized void deinit() { + if (isInited) { + isInited = false; + metricMap.clear(); + instance = null; + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/MetricSink.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/MetricSink.java new file mode 100644 index 0000000000..d43fd8b7be --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/MetricSink.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.ReplicationMetricList; +import org.apache.hadoop.hive.metastore.api.ReplicationMetrics; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.Retry; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * MetricSink. + * Scheduled thread to poll from Metric Collector and persists to DB + */ +public class MetricSink { + private static final Logger LOG = LoggerFactory.getLogger(MetricSink.class); + private ScheduledExecutorService executorService; + private static volatile MetricSink instance; + private boolean isInitialised = false; + private HiveConf conf; + + private MetricSink() { + this.executorService = Executors.newSingleThreadScheduledExecutor(); + } + + public static MetricSink getInstance() { + if (instance == null) { + synchronized (MetricSink.class) { + if (instance == null) { + instance = new MetricSink(); + } + } + } + return instance; + } + + public synchronized void init(HiveConf conf) { + if (!isInitialised) { + this.conf = conf; + this.executorService.schedule(new MetricSinkWriter(conf), getFrequencyInSecs(), TimeUnit.SECONDS); + isInitialised = true; + } + } + + long getFrequencyInSecs() { + //Metastore conf is in minutes + return MetastoreConf.getLongVar(conf, MetastoreConf.ConfVars.REPL_METRICS_UPDATE_FREQUENCY) * 60; + } + + public synchronized void tearDown() { + if (isInitialised) { + try { + this.executorService.shutdown(); + } finally { + if (!this.executorService.isShutdown()) { + this.executorService.shutdownNow(); + } + } + isInitialised = false; + } + } + + static class MetricSinkWriter implements Runnable { + private MetricCollector collector; + private HiveConf conf; + + // writer instance + + MetricSinkWriter(HiveConf conf) { + this.collector = MetricCollector.getInstance(); + this.conf = conf; + } + + @Override + public void run() { + // get metrics + List metrics = collector.getMetrics(); + // write metrics and retry if fails + Retry retriable = new Retry(Exception.class) { + @Override + public Void execute() throws Exception { + if (metrics.size() > 0) { + ReplicationMetricList metricList = new ReplicationMetricList(); + List replicationMetricsList = new ArrayList<>(metrics.size()); + for (ReplicationMetric metric : metrics) { + ReplicationMetrics persistentMetric = new ReplicationMetrics(); + persistentMetric.setDumpExecutionId(metric.getDumpExecutionId()); + persistentMetric.setScheduledExecutionId(metric.getScheduledExecutionId()); + persistentMetric.setPolicy(metric.getPolicy()); + ObjectMapper mapper = new ObjectMapper(); + persistentMetric.setProgress(mapper.writeValueAsString(metric.getProgress())); + persistentMetric.setMetadata(mapper.writeValueAsString(metric.getMetadata())); + replicationMetricsList.add(persistentMetric); + } + metricList.setReplicationMetricList(replicationMetricsList); + //write + Hive.get(conf).getMSC().addReplicationMetrics(metricList); + } + return null; + } + }; + try { + retriable.run(); + } catch (Exception e) { + throw new RuntimeException("Metrics are not getting persisted"); + } + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java new file mode 100644 index 0000000000..991d510d99 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric; + +import org.apache.hadoop.hive.conf.Constants; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.utils.StringUtils; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Progress; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metric; + +import java.util.Map; + +/** + * Abstract class for Replication Metric Collection. + */ +public abstract class ReplicationMetricCollector { + private ReplicationMetric replicationMetric; + private MetricCollector metricCollector; + private boolean isEnabled; + + public ReplicationMetricCollector(String dbName, Metadata.ReplicationType replicationType, + String stagingDir, long dumpExecutionId, HiveConf conf) { + String policy = conf.get(Constants.SCHEDULED_QUERY_SCHEDULENAME); + long executionId = conf.getLong(Constants.SCHEDULED_QUERY_EXECUTIONID, 0L); + if (!StringUtils.isEmpty(policy) && executionId > 0) { + isEnabled = true; + metricCollector = MetricCollector.getInstance().init(conf); + MetricSink.getInstance().init(conf); + Metadata metadata = new Metadata(dbName, replicationType, stagingDir); + replicationMetric = new ReplicationMetric(executionId, policy, dumpExecutionId, metadata); + } + } + + public void reportStageStart(String stageName, Map metricMap) throws SemanticException { + if (isEnabled) { + Progress progress = replicationMetric.getProgress(); + Stage stage = new Stage(stageName, Status.IN_PROGRESS, System.currentTimeMillis()); + for (Map.Entry metric : metricMap.entrySet()) { + stage.addMetric(new Metric(metric.getKey(), metric.getValue())); + } + progress.addStage(stage); + replicationMetric.setProgress(progress); + metricCollector.addMetric(replicationMetric); + } + } + + + public void reportStageEnd(String stageName, Status status, long lastReplId) throws SemanticException { + if (isEnabled) { + Progress progress = replicationMetric.getProgress(); + Stage stage = progress.getStageByName(stageName); + stage.setStatus(status); + stage.setEndTime(System.currentTimeMillis()); + progress.updateStage(stage); + replicationMetric.setProgress(progress); + Metadata metadata = replicationMetric.getMetadata(); + metadata.setLastReplId(lastReplId); + replicationMetric.setMetadata(metadata); + metricCollector.addMetric(replicationMetric); + } + } + + public void reportStageEnd(String stageName, Status status) throws SemanticException { + if (isEnabled) { + Progress progress = replicationMetric.getProgress(); + Stage stage = progress.getStageByName(stageName); + stage.setStatus(status); + stage.setEndTime(System.currentTimeMillis()); + progress.updateStage(stage); + replicationMetric.setProgress(progress); + metricCollector.addMetric(replicationMetric); + } + } + + public void reportStageProgress(String stageName, String metricName, long count) throws SemanticException { + if (isEnabled) { + Progress progress = replicationMetric.getProgress(); + Stage stage = progress.getStageByName(stageName); + Metric metric = stage.getMetricByName(metricName); + metric.setCurrentCount(metric.getCurrentCount() + count); + if (metric.getCurrentCount() > metric.getTotalCount()) { + metric.setTotalCount(metric.getCurrentCount()); + } + stage.addMetric(metric); + progress.updateStage(stage); + replicationMetric.setProgress(progress); + metricCollector.addMetric(replicationMetric); + } + } + + public void reportEnd(Status status) throws SemanticException { + if (isEnabled) { + Progress progress = replicationMetric.getProgress(); + progress.setStatus(status); + replicationMetric.setProgress(progress); + metricCollector.addMetric(replicationMetric); + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metadata.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metadata.java new file mode 100644 index 0000000000..3ba5724b57 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metadata.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric.event; + +/** + * Class for defining the metadata info for replication metrics. + */ +public class Metadata { + /** + * Type of replication. + */ + public enum ReplicationType { + BOOTSTRAP, + INCREMENTAL + } + private String dbName; + private ReplicationType replicationType; + private String stagingDir; + private long lastReplId; + + public Metadata() { + + } + + public Metadata(String dbName, ReplicationType replicationType, String stagingDir) { + this.dbName = dbName; + this.replicationType = replicationType; + this.stagingDir = stagingDir; + } + + public long getLastReplId() { + return lastReplId; + } + + public String getDbName() { + return dbName; + } + + public ReplicationType getReplicationType() { + return replicationType; + } + + public String getStagingDir() { + return stagingDir; + } + + public void setLastReplId(long lastReplId) { + this.lastReplId = lastReplId; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metric.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metric.java new file mode 100644 index 0000000000..f2af1487b4 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metric.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric.event; + +/** + * Class for defining the unit metric. + */ +public class Metric { + private String name; + private long currentCount; + private long totalCount; + + public Metric() { + + } + + public Metric(String name, long totalCount) { + this.name = name; + this.totalCount = totalCount; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public long getCurrentCount() { + return currentCount; + } + + public void setCurrentCount(long currentCount) { + this.currentCount = currentCount; + } + + public long getTotalCount() { + return totalCount; + } + + public void setTotalCount(long totalCount) { + this.totalCount = totalCount; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Progress.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Progress.java new file mode 100644 index 0000000000..da5d8174d0 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Progress.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric.event; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; + +import java.util.Map; +import java.util.List; +import java.util.ArrayList; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Class for defining the progress info for replication metrics. + */ +public class Progress { + + private Status status; + + @JsonProperty() + @JsonDeserialize() + private Map stages = new ConcurrentHashMap<>(); + + public Progress() { + + } + + public Status getStatus() { + return status; + } + + public void setStatus(Status status) { + this.status = status; + } + + public void addStage(Stage stage) { + stages.putIfAbsent(stage.getName(), stage); + } + + public void updateStage(Stage stage) { + stages.put(stage.getName(), stage); + } + + public Stage getStageByName(String stageName) { + return stages.get(stageName); + } + + public List getStages() { + return new ArrayList<>(stages.values()); + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/ReplicationMetric.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/ReplicationMetric.java new file mode 100644 index 0000000000..cc833613be --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/ReplicationMetric.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric.event; + +/** + * Class for defining the replication metrics. + */ +public class ReplicationMetric { + private long scheduledExecutionId; + private String policy; + private long dumpExecutionId; + private Metadata metadata; + private Progress progress; + + public ReplicationMetric(long scheduledExecutionId, String policy, long dumpExecutionId, Metadata metadata){ + this.scheduledExecutionId = scheduledExecutionId; + this.policy = policy; + this.dumpExecutionId = dumpExecutionId; + this.metadata = metadata; + this.progress = new Progress(); + } + + public long getScheduledExecutionId() { + return scheduledExecutionId; + } + + + public String getPolicy() { + return policy; + } + + public long getDumpExecutionId() { + return dumpExecutionId; + } + + public Progress getProgress() { + return progress; + } + + public void setMetadata(Metadata metadata) { + this.metadata = metadata; + } + + public Metadata getMetadata() { + return metadata; + } + + public void setProgress(Progress progress) { + this.progress = progress; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Stage.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Stage.java new file mode 100644 index 0000000000..490613a44b --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Stage.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.metric.event; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Class for defining the different stages of replication. + */ +public class Stage { + private String name; + private Status status; + private long startTime; + private long endTime; + private Map metrics = new HashMap<>(); + + public Stage() { + + } + + public Stage(String name, Status status, long startTime) { + this.name = name; + this.status = status; + this.startTime = startTime; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Status getStatus() { + return status; + } + + public void setStatus(Status status) { + this.status = status; + } + + public long getStartTime() { + return startTime; + } + + public void setStartTime(long startTime) { + this.startTime = startTime; + } + + public long getEndTime() { + return endTime; + } + + public void setEndTime(long endTime) { + this.endTime = endTime; + } + + + public void addMetric(Metric metric) { + this.metrics.put(metric.getName(), metric); + } + + public Metric getMetricByName(String name) { + return this.metrics.get(name); + } + + public List getMetrics() { + return new ArrayList<>(metrics.values()); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Status.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Status.java new file mode 100644 index 0000000000..96cf565f76 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Status.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse.repl.metric.event; + +/** + * Enum to define the status. + */ +public enum Status { + SUCCESS, + FAILED, + IN_PROGRESS +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestAtlasDumpTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestAtlasDumpTask.java index dee332f767..6cfd782315 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestAtlasDumpTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestAtlasDumpTask.java @@ -22,7 +22,10 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.repl.atlas.AtlasReplInfo; import org.apache.hadoop.hive.ql.exec.repl.atlas.AtlasRequestBuilder; +import org.apache.hadoop.hive.ql.exec.repl.atlas.AtlasRestClient; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.repl.ReplState; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -45,39 +48,52 @@ public class TestAtlasDumpTask { @Mock + private AtlasRestClient atlasRestClient; + private AtlasDumpTask atlasDumpTask; @Mock private HiveConf conf; + @Mock + private AtlasDumpWork work; + + @Mock + private ReplicationMetricCollector metricCollector; + @Test public void testAtlasDumpMetrics() throws Exception { - AtlasReplInfo atlasReplInfo = new AtlasReplInfo("http://localhost:21000/atlas", "srcDB", - "tgtDb", "srcCluster", "tgtCluster", new Path("hdfs://tmp"), conf); - atlasReplInfo.setSrcFsUri("hdfs://srcFsUri:8020"); - atlasReplInfo.setTgtFsUri("hdfs:tgtFsUri:8020"); - Mockito.when(atlasDumpTask.createAtlasReplInfo()).thenReturn(atlasReplInfo); - Mockito.when(atlasDumpTask.lastStoredTimeStamp()).thenReturn(0L); - Mockito.when(atlasDumpTask.dumpAtlasMetaData(any(AtlasRequestBuilder.class), any(AtlasReplInfo.class))) - .thenReturn(0L); + Mockito.when(work.getMetricCollector()).thenReturn(metricCollector); + Mockito.when(conf.get(HiveConf.ConfVars.REPL_ATLAS_ENDPOINT.varname)).thenReturn("http://localhost:21000/atlas"); + Mockito.when(conf.get(HiveConf.ConfVars.REPL_ATLAS_REPLICATED_TO_DB.varname)).thenReturn("tgtDb"); + Mockito.when(conf.get(HiveConf.ConfVars.REPL_SOURCE_CLUSTER_NAME.varname)).thenReturn("srcCluster"); + Mockito.when(conf.get(HiveConf.ConfVars.REPL_TARGET_CLUSTER_NAME.varname)).thenReturn("tgtCluster"); + Mockito.when(conf.get(ReplUtils.DEFAULT_FS_CONFIG)).thenReturn("hdfs:tgtFsUri:8020"); + Mockito.when(work.getStagingDir()).thenReturn(new Path("hdfs://tmp:8020/staging")); + Mockito.when(work.getSrcDB()).thenReturn("srcDB"); + Mockito.when(work.isBootstrap()).thenReturn(true); + atlasDumpTask = new AtlasDumpTask(atlasRestClient, conf, work); + AtlasDumpTask atlasDumpTaskSpy = Mockito.spy(atlasDumpTask); Mockito.when(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST_REPL)).thenReturn(true); Logger logger = Mockito.mock(Logger.class); Whitebox.setInternalState(ReplState.class, logger); - Mockito.when(atlasDumpTask.execute()).thenCallRealMethod(); - int status = atlasDumpTask.execute(); + Mockito.doReturn(0L).when(atlasDumpTaskSpy) + .dumpAtlasMetaData(any(AtlasRequestBuilder.class), any(AtlasReplInfo.class)); + Mockito.doNothing().when(atlasDumpTaskSpy).createDumpMetadata(any(AtlasReplInfo.class), any(Long.class)); + int status = atlasDumpTaskSpy.execute(); Assert.assertEquals(0, status); ArgumentCaptor replStateCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor eventCaptor = ArgumentCaptor.forClass(Object.class); ArgumentCaptor eventDetailsCaptor = ArgumentCaptor.forClass(Object.class); Mockito.verify(logger, - Mockito.times(2)).info(replStateCaptor.capture(), - eventCaptor.capture(), eventDetailsCaptor.capture()); + Mockito.times(2)).info(replStateCaptor.capture(), + eventCaptor.capture(), eventDetailsCaptor.capture()); Assert.assertEquals("REPL::{}: {}", replStateCaptor.getAllValues().get(0)); Assert.assertEquals("ATLAS_DUMP_START", eventCaptor.getAllValues().get(0)); Assert.assertEquals("ATLAS_DUMP_END", eventCaptor.getAllValues().get(1)); Assert.assertTrue(eventDetailsCaptor.getAllValues().get(1).toString(), eventDetailsCaptor.getAllValues().get(0) - .toString().contains("{\"dbName\":\"srcDB\",\"dumpStartTime")); + .toString().contains("{\"dbName\":\"srcDB\",\"dumpStartTime")); Assert.assertTrue(eventDetailsCaptor - .getAllValues().get(1).toString().contains("{\"dbName\":\"srcDB\",\"dumpEndTime\"")); + .getAllValues().get(1).toString().contains("{\"dbName\":\"srcDB\",\"dumpEndTime\"")); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestAtlasLoadTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestAtlasLoadTask.java index bb5fe0b352..826935b187 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestAtlasLoadTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestAtlasLoadTask.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.repl.atlas.AtlasReplInfo; import org.apache.hadoop.hive.ql.parse.repl.ReplState; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -36,24 +37,32 @@ */ @RunWith(MockitoJUnitRunner.class) public class TestAtlasLoadTask { - @Mock + private AtlasLoadTask atlasLoadTask; @Mock private HiveConf conf; + @Mock + private AtlasLoadWork work; + + @Mock + private ReplicationMetricCollector metricCollector; + @Test public void testAtlasLoadMetrics() throws Exception { + Mockito.when(work.getMetricCollector()).thenReturn(metricCollector); + atlasLoadTask = new AtlasLoadTask(conf, work); + AtlasLoadTask atlasLoadTaskSpy = Mockito.spy(atlasLoadTask); + Mockito.when(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST_REPL)).thenReturn(true); + Logger logger = Mockito.mock(Logger.class); + Whitebox.setInternalState(ReplState.class, logger); AtlasReplInfo atlasReplInfo = new AtlasReplInfo("http://localhost:21000/atlas", "srcDB", - "tgtDB", "srcCluster", "tgtCluster", new Path("hdfs://tmp"), conf); + "tgtDB", "srcCluster", "tgtCluster", new Path("hdfs://tmp"), conf); atlasReplInfo.setSrcFsUri("hdfs://srcFsUri:8020"); atlasReplInfo.setTgtFsUri("hdfs:tgtFsUri:8020"); - Mockito.when(atlasLoadTask.createAtlasReplInfo()).thenReturn(atlasReplInfo); - Mockito.when(atlasLoadTask.importAtlasMetadata(atlasReplInfo)).thenReturn(1); - Logger logger = Mockito.mock(Logger.class); - Whitebox.setInternalState(ReplState.class, logger); - Mockito.when(atlasLoadTask.execute()).thenCallRealMethod(); - int status = atlasLoadTask.execute(); + Mockito.doReturn(atlasReplInfo).when(atlasLoadTaskSpy).createAtlasReplInfo(); + int status = atlasLoadTaskSpy.execute(); Assert.assertEquals(0, status); ArgumentCaptor replStateCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor eventCaptor = ArgumentCaptor.forClass(Object.class); @@ -68,6 +77,6 @@ public void testAtlasLoadMetrics() throws Exception { .toString().contains("{\"sourceDbName\":\"srcDB\",\"targetDbName\":\"tgtDB\",\"loadStartTime\":")); Assert.assertTrue(eventDetailsCaptor .getAllValues().get(1).toString().contains("{\"sourceDbName\":\"srcDB\",\"targetDbName\"" - + ":\"tgtDB\",\"numOfEntities\":1,\"loadEndTime\"")); + + ":\"tgtDB\",\"numOfEntities\":0,\"loadEndTime\"")); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java index 9a20564de9..12e074107c 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.ql.exec.repl.ranger.RangerPolicy; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.repl.ReplState; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -63,11 +64,15 @@ @Mock private RangerDumpWork work; + @Mock + private ReplicationMetricCollector metricCollector; + @Before public void setup() throws Exception { task = new RangerDumpTask(mockClient, conf, work); Mockito.when(mockClient.removeMultiResourcePolicies(Mockito.anyList())).thenCallRealMethod(); Mockito.when(mockClient.checkConnection(Mockito.anyString())).thenReturn(true); + Mockito.when(work.getMetricCollector()).thenReturn(metricCollector); } @Test diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerLoadTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerLoadTask.java index af41e3d773..f3397702c8 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerLoadTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerLoadTask.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.ql.exec.repl.ranger.RangerPolicy; import org.apache.hadoop.hive.ql.exec.repl.ranger.RangerRestClientImpl; import org.apache.hadoop.hive.ql.parse.repl.ReplState; +import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -61,6 +62,9 @@ @Mock private RangerLoadWork work; + @Mock + private ReplicationMetricCollector metricCollector; + @Before public void setup() throws Exception { task = new RangerLoadTask(mockClient, conf, work); @@ -69,6 +73,7 @@ public void setup() throws Exception { Mockito.when(mockClient.addDenyPolicies(Mockito.anyList(), Mockito.anyString(), Mockito.anyString(), Mockito.anyString())).thenCallRealMethod(); Mockito.when(mockClient.checkConnection(Mockito.anyString())).thenReturn(true); + Mockito.when(work.getMetricCollector()).thenReturn(metricCollector); } @Test diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java new file mode 100644 index 0000000000..95de5a849e --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java @@ -0,0 +1,306 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse.repl.metric; + +import org.apache.hadoop.hive.conf.Constants; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Progress; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metric; +import org.junit.Assert; +import org.junit.Before; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Map; +import java.util.HashMap; +import java.util.List; +import java.util.Arrays; + +/** + * Unit Test class for In Memory Replication Metric Collection. + */ +@RunWith(MockitoJUnitRunner.class) +public class TestReplicationMetricCollector { + + HiveConf conf; + + @Before + public void setup() throws Exception { + conf = new HiveConf(); + conf.set(Constants.SCHEDULED_QUERY_SCHEDULENAME, "repl"); + conf.set(Constants.SCHEDULED_QUERY_EXECUTIONID, "1"); + MetricCollector.getInstance().init(conf); + } + + @After + public void finalize() { + MetricCollector.getInstance().deinit(); + } + + @Test + public void testFailureCacheHardLimit() throws Exception { + MetricCollector.getInstance().deinit(); + conf = new HiveConf(); + MetricCollector collector = MetricCollector.getInstance(); + MetricCollector metricCollectorSpy = Mockito.spy(collector); + Mockito.doReturn(1L).when(metricCollectorSpy).getMaxSize(Mockito.any()); + metricCollectorSpy.init(conf); + metricCollectorSpy.addMetric(new ReplicationMetric(1, "repl", + 0, null)); + try { + metricCollectorSpy.addMetric(new ReplicationMetric(2, "repl", + 0, null)); + Assert.fail(); + } catch (SemanticException e) { + Assert.assertEquals("Metrics are not getting collected. ", e.getMessage()); + } + } + + @Test + public void testFailureNoScheduledId() throws Exception { + MetricCollector.getInstance().deinit(); + conf = new HiveConf(); + MetricCollector.getInstance().init(conf); + ReplicationMetricCollector bootstrapDumpMetricCollector = new BootstrapDumpMetricCollector("db", + "staging", conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + bootstrapDumpMetricCollector.reportStageStart("dump", metricMap); + bootstrapDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS); + Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size()); + } + + @Test + public void testFailureNoPolicyId() throws Exception { + MetricCollector.getInstance().deinit(); + conf = new HiveConf(); + MetricCollector.getInstance().init(conf); + ReplicationMetricCollector bootstrapDumpMetricCollector = new BootstrapDumpMetricCollector("db", + "staging", conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + bootstrapDumpMetricCollector.reportStageStart("dump", metricMap); + bootstrapDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS); + Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size()); + } + + @Test + public void testSuccessBootstrapDumpMetrics() throws Exception { + ReplicationMetricCollector bootstrapDumpMetricCollector = new BootstrapDumpMetricCollector("db", + "staging", conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + bootstrapDumpMetricCollector.reportStageStart("dump", metricMap); + bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1); + List actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2); + bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + bootstrapDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10); + bootstrapDumpMetricCollector.reportEnd(Status.SUCCESS); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.BOOTSTRAP, "staging"); + expectedMetadata.setLastReplId(10); + Progress expectedProgress = new Progress(); + expectedProgress.setStatus(Status.SUCCESS); + Stage dumpStage = new Stage("dump", Status.SUCCESS, 0); + dumpStage.setEndTime(0); + Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10); + expectedTableMetric.setCurrentCount(3); + Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1); + expectedFuncMetric.setCurrentCount(1); + dumpStage.addMetric(expectedTableMetric); + dumpStage.addMetric(expectedFuncMetric); + expectedProgress.addStage(dumpStage); + ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 0, expectedMetadata); + expectedMetric.setProgress(expectedProgress); + checkSuccess(actualMetrics.get(0), expectedMetric, "dump", + Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name())); + } + + @Test + public void testSuccessIncrDumpMetrics() throws Exception { + ReplicationMetricCollector incrDumpMetricCollector = new IncrementalDumpMetricCollector("db", + "staging", conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + incrDumpMetricCollector.reportStageStart("dump", metricMap); + incrDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1); + List actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + incrDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2); + incrDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + incrDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10); + incrDumpMetricCollector.reportEnd(Status.SUCCESS); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.INCREMENTAL, "staging"); + expectedMetadata.setLastReplId(10); + Progress expectedProgress = new Progress(); + expectedProgress.setStatus(Status.SUCCESS); + Stage dumpStage = new Stage("dump", Status.SUCCESS, 0); + dumpStage.setEndTime(0); + Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10); + expectedTableMetric.setCurrentCount(3); + Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1); + expectedFuncMetric.setCurrentCount(1); + dumpStage.addMetric(expectedTableMetric); + dumpStage.addMetric(expectedFuncMetric); + expectedProgress.addStage(dumpStage); + ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 0, + expectedMetadata); + expectedMetric.setProgress(expectedProgress); + checkSuccess(actualMetrics.get(0), expectedMetric, "dump", + Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name())); + } + + @Test + public void testSuccessBootstrapLoadMetrics() throws Exception { + ReplicationMetricCollector bootstrapLoadMetricCollector = new BootstrapLoadMetricCollector("db", + "staging", 1, conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + bootstrapLoadMetricCollector.reportStageStart("dump", metricMap); + bootstrapLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1); + List actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + bootstrapLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2); + bootstrapLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + bootstrapLoadMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10); + bootstrapLoadMetricCollector.reportEnd(Status.SUCCESS); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.BOOTSTRAP, "staging"); + expectedMetadata.setLastReplId(10); + Progress expectedProgress = new Progress(); + expectedProgress.setStatus(Status.SUCCESS); + Stage dumpStage = new Stage("dump", Status.SUCCESS, 0); + dumpStage.setEndTime(0); + Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10); + expectedTableMetric.setCurrentCount(3); + Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1); + expectedFuncMetric.setCurrentCount(1); + dumpStage.addMetric(expectedTableMetric); + dumpStage.addMetric(expectedFuncMetric); + expectedProgress.addStage(dumpStage); + ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 1, + expectedMetadata); + expectedMetric.setProgress(expectedProgress); + checkSuccess(actualMetrics.get(0), expectedMetric, "dump", + Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name())); + } + + @Test + public void testSuccessIncrLoadMetrics() throws Exception { + ReplicationMetricCollector incrLoadMetricCollector = new IncrementalLoadMetricCollector("db", + "staging", 1, conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + incrLoadMetricCollector.reportStageStart("dump", metricMap); + incrLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1); + List actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + incrLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2); + incrLoadMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + incrLoadMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10); + incrLoadMetricCollector.reportEnd(Status.SUCCESS); + actualMetrics = MetricCollector.getInstance().getMetrics(); + Assert.assertEquals(1, actualMetrics.size()); + + Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.INCREMENTAL, "staging"); + expectedMetadata.setLastReplId(10); + Progress expectedProgress = new Progress(); + expectedProgress.setStatus(Status.SUCCESS); + Stage dumpStage = new Stage("dump", Status.SUCCESS, 0); + dumpStage.setEndTime(0); + Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10); + expectedTableMetric.setCurrentCount(3); + Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1); + expectedFuncMetric.setCurrentCount(1); + dumpStage.addMetric(expectedTableMetric); + dumpStage.addMetric(expectedFuncMetric); + expectedProgress.addStage(dumpStage); + ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 1, + expectedMetadata); + expectedMetric.setProgress(expectedProgress); + checkSuccess(actualMetrics.get(0), expectedMetric, "dump", + Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name())); + } + + private void checkSuccess(ReplicationMetric actual, ReplicationMetric expected, String stageName, + List metricNames) { + Assert.assertEquals(expected.getDumpExecutionId(), actual.getDumpExecutionId()); + Assert.assertEquals(expected.getPolicy(), actual.getPolicy()); + Assert.assertEquals(expected.getScheduledExecutionId(), actual.getScheduledExecutionId()); + Assert.assertEquals(expected.getMetadata().getReplicationType(), actual.getMetadata().getReplicationType()); + Assert.assertEquals(expected.getMetadata().getDbName(), actual.getMetadata().getDbName()); + Assert.assertEquals(expected.getMetadata().getStagingDir(), actual.getMetadata().getStagingDir()); + Assert.assertEquals(expected.getMetadata().getLastReplId(), actual.getMetadata().getLastReplId()); + Assert.assertEquals(expected.getProgress().getStatus(), actual.getProgress().getStatus()); + Assert.assertEquals(expected.getProgress().getStageByName(stageName).getStatus(), + actual.getProgress().getStageByName(stageName).getStatus()); + for (String metricName : metricNames) { + Assert.assertEquals(expected.getProgress().getStageByName(stageName).getMetricByName(metricName).getTotalCount(), + actual.getProgress().getStageByName(stageName).getMetricByName(metricName).getTotalCount()); + Assert.assertEquals(expected.getProgress().getStageByName(stageName).getMetricByName(metricName) + .getCurrentCount(), actual.getProgress() + .getStageByName(stageName).getMetricByName(metricName).getCurrentCount()); + } + } + +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricSink.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricSink.java new file mode 100644 index 0000000000..55d23be137 --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricSink.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse.repl.metric; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hadoop.hive.conf.Constants; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.GetReplicationMetricsRequest; +import org.apache.hadoop.hive.metastore.api.ReplicationMetricList; +import org.apache.hadoop.hive.metastore.api.ReplicationMetrics; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Progress; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metric; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; +import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.REPL_METRICS_UPDATE_FREQUENCY; + +/** + * Unit Test class for In Memory Replication Metric Collection. + */ +@RunWith(MockitoJUnitRunner.class) +public class TestReplicationMetricSink { + + HiveConf conf; + + @Before + public void setup() throws Exception { + conf = new HiveConf(); + conf.set(Constants.SCHEDULED_QUERY_SCHEDULENAME, "repl"); + conf.set(Constants.SCHEDULED_QUERY_EXECUTIONID, "1"); + MetricSink metricSinkSpy = Mockito.spy(MetricSink.getInstance()); + Mockito.doReturn(1L).when(metricSinkSpy).getFrequencyInSecs(); + metricSinkSpy.init(conf); + } + + @Test + public void testSuccessBootstrapDumpMetrics() throws Exception { + ReplicationMetricCollector bootstrapDumpMetricCollector = new BootstrapDumpMetricCollector("db", + "staging", conf); + Map metricMap = new HashMap<>(); + metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10); + metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1); + bootstrapDumpMetricCollector.reportStageStart("dump", metricMap); + bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 1); + bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.TABLES.name(), 2); + bootstrapDumpMetricCollector.reportStageProgress("dump", ReplUtils.MetricName.FUNCTIONS.name(), 1); + bootstrapDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10); + bootstrapDumpMetricCollector.reportEnd(Status.SUCCESS); + + Metadata expectedMetadata = new Metadata("db", Metadata.ReplicationType.BOOTSTRAP, "staging"); + expectedMetadata.setLastReplId(10); + Progress expectedProgress = new Progress(); + expectedProgress.setStatus(Status.SUCCESS); + Stage dumpStage = new Stage("dump", Status.SUCCESS, 0); + dumpStage.setEndTime(0); + Metric expectedTableMetric = new Metric(ReplUtils.MetricName.TABLES.name(), 10); + expectedTableMetric.setCurrentCount(3); + Metric expectedFuncMetric = new Metric(ReplUtils.MetricName.FUNCTIONS.name(), 1); + expectedFuncMetric.setCurrentCount(1); + dumpStage.addMetric(expectedTableMetric); + dumpStage.addMetric(expectedFuncMetric); + expectedProgress.addStage(dumpStage); + ReplicationMetric expectedMetric = new ReplicationMetric(1, "repl", 0, expectedMetadata); + expectedMetric.setProgress(expectedProgress); + Thread.sleep(1000 * 20); + GetReplicationMetricsRequest metricsRequest = new GetReplicationMetricsRequest(); + metricsRequest.setPolicy("repl"); + ReplicationMetricList actualReplicationMetrics = Hive.get(conf).getMSC().getReplicationMetrics(metricsRequest); + ReplicationMetrics actualThriftMetric = actualReplicationMetrics.getReplicationMetricList().get(0); + ObjectMapper mapper = new ObjectMapper(); + ReplicationMetric actualMetric = new ReplicationMetric(actualThriftMetric.getScheduledExecutionId(), + actualThriftMetric.getPolicy(), actualThriftMetric.getDumpExecutionId(), + mapper.readValue(actualThriftMetric.getMetadata(), Metadata.class)); + actualMetric.setProgress(mapper.readValue(actualThriftMetric.getProgress(), Progress.class)); + checkSuccess(actualMetric, expectedMetric, "dump", + Arrays.asList(ReplUtils.MetricName.TABLES.name(), ReplUtils.MetricName.FUNCTIONS.name())); + } + + private void checkSuccess(ReplicationMetric actual, ReplicationMetric expected, String stageName, + List metricNames) { + Assert.assertEquals(expected.getDumpExecutionId(), actual.getDumpExecutionId()); + Assert.assertEquals(expected.getPolicy(), actual.getPolicy()); + Assert.assertEquals(expected.getScheduledExecutionId(), actual.getScheduledExecutionId()); + Assert.assertEquals(expected.getMetadata().getReplicationType(), actual.getMetadata().getReplicationType()); + Assert.assertEquals(expected.getMetadata().getDbName(), actual.getMetadata().getDbName()); + Assert.assertEquals(expected.getMetadata().getStagingDir(), actual.getMetadata().getStagingDir()); + Assert.assertEquals(expected.getMetadata().getLastReplId(), actual.getMetadata().getLastReplId()); + Assert.assertEquals(expected.getProgress().getStatus(), actual.getProgress().getStatus()); + Assert.assertEquals(expected.getProgress().getStageByName(stageName).getStatus(), + actual.getProgress().getStageByName(stageName).getStatus()); + for (String metricName : metricNames) { + Assert.assertEquals(expected.getProgress().getStageByName(stageName).getMetricByName(metricName).getTotalCount(), + actual.getProgress().getStageByName(stageName).getMetricByName(metricName).getTotalCount()); + Assert.assertEquals(expected.getProgress().getStageByName(stageName).getMetricByName(metricName) + .getCurrentCount(), actual.getProgress() + .getStageByName(stageName).getMetricByName(metricName).getCurrentCount()); + } + } + +} diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java b/service/src/java/org/apache/hive/service/server/HiveServer2.java index d600f3a1fb..4ac7bd2d2f 100644 --- a/service/src/java/org/apache/hive/service/server/HiveServer2.java +++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.metadata.events.NotificationEventPoll; import org.apache.hadoop.hive.ql.parse.CalcitePlanner; +import org.apache.hadoop.hive.ql.parse.repl.metric.MetricSink; import org.apache.hadoop.hive.ql.plan.mapper.StatsSources; import org.apache.hadoop.hive.ql.scheduled.ScheduledQueryExecutionService; import org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider; @@ -902,6 +903,8 @@ public synchronized void stop() { LOG.error("Error stopping schq", e); } } + //Shutdown metric collection + MetricSink.getInstance().tearDown(); if (hs2HARegistry != null) { hs2HARegistry.stop(); shutdownExecutor(leaderActionsExecutorService); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetReplicationMetricsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetReplicationMetricsRequest.java new file mode 100644 index 0000000000..8d63488679 --- /dev/null +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetReplicationMetricsRequest.java @@ -0,0 +1,595 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetReplicationMetricsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetReplicationMetricsRequest"); + + private static final org.apache.thrift.protocol.TField SCHEDULED_EXECUTION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("scheduledExecutionId", org.apache.thrift.protocol.TType.I64, (short)1); + private static final org.apache.thrift.protocol.TField POLICY_FIELD_DESC = new org.apache.thrift.protocol.TField("policy", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField DUMP_EXECUTION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("dumpExecutionId", org.apache.thrift.protocol.TType.I64, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetReplicationMetricsRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetReplicationMetricsRequestTupleSchemeFactory()); + } + + private long scheduledExecutionId; // optional + private String policy; // optional + private long dumpExecutionId; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SCHEDULED_EXECUTION_ID((short)1, "scheduledExecutionId"), + POLICY((short)2, "policy"), + DUMP_EXECUTION_ID((short)3, "dumpExecutionId"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // SCHEDULED_EXECUTION_ID + return SCHEDULED_EXECUTION_ID; + case 2: // POLICY + return POLICY; + case 3: // DUMP_EXECUTION_ID + return DUMP_EXECUTION_ID; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __SCHEDULEDEXECUTIONID_ISSET_ID = 0; + private static final int __DUMPEXECUTIONID_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.SCHEDULED_EXECUTION_ID,_Fields.POLICY,_Fields.DUMP_EXECUTION_ID}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SCHEDULED_EXECUTION_ID, new org.apache.thrift.meta_data.FieldMetaData("scheduledExecutionId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.POLICY, new org.apache.thrift.meta_data.FieldMetaData("policy", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DUMP_EXECUTION_ID, new org.apache.thrift.meta_data.FieldMetaData("dumpExecutionId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetReplicationMetricsRequest.class, metaDataMap); + } + + public GetReplicationMetricsRequest() { + } + + /** + * Performs a deep copy on other. + */ + public GetReplicationMetricsRequest(GetReplicationMetricsRequest other) { + __isset_bitfield = other.__isset_bitfield; + this.scheduledExecutionId = other.scheduledExecutionId; + if (other.isSetPolicy()) { + this.policy = other.policy; + } + this.dumpExecutionId = other.dumpExecutionId; + } + + public GetReplicationMetricsRequest deepCopy() { + return new GetReplicationMetricsRequest(this); + } + + @Override + public void clear() { + setScheduledExecutionIdIsSet(false); + this.scheduledExecutionId = 0; + this.policy = null; + setDumpExecutionIdIsSet(false); + this.dumpExecutionId = 0; + } + + public long getScheduledExecutionId() { + return this.scheduledExecutionId; + } + + public void setScheduledExecutionId(long scheduledExecutionId) { + this.scheduledExecutionId = scheduledExecutionId; + setScheduledExecutionIdIsSet(true); + } + + public void unsetScheduledExecutionId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SCHEDULEDEXECUTIONID_ISSET_ID); + } + + /** Returns true if field scheduledExecutionId is set (has been assigned a value) and false otherwise */ + public boolean isSetScheduledExecutionId() { + return EncodingUtils.testBit(__isset_bitfield, __SCHEDULEDEXECUTIONID_ISSET_ID); + } + + public void setScheduledExecutionIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SCHEDULEDEXECUTIONID_ISSET_ID, value); + } + + public String getPolicy() { + return this.policy; + } + + public void setPolicy(String policy) { + this.policy = policy; + } + + public void unsetPolicy() { + this.policy = null; + } + + /** Returns true if field policy is set (has been assigned a value) and false otherwise */ + public boolean isSetPolicy() { + return this.policy != null; + } + + public void setPolicyIsSet(boolean value) { + if (!value) { + this.policy = null; + } + } + + public long getDumpExecutionId() { + return this.dumpExecutionId; + } + + public void setDumpExecutionId(long dumpExecutionId) { + this.dumpExecutionId = dumpExecutionId; + setDumpExecutionIdIsSet(true); + } + + public void unsetDumpExecutionId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DUMPEXECUTIONID_ISSET_ID); + } + + /** Returns true if field dumpExecutionId is set (has been assigned a value) and false otherwise */ + public boolean isSetDumpExecutionId() { + return EncodingUtils.testBit(__isset_bitfield, __DUMPEXECUTIONID_ISSET_ID); + } + + public void setDumpExecutionIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DUMPEXECUTIONID_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SCHEDULED_EXECUTION_ID: + if (value == null) { + unsetScheduledExecutionId(); + } else { + setScheduledExecutionId((Long)value); + } + break; + + case POLICY: + if (value == null) { + unsetPolicy(); + } else { + setPolicy((String)value); + } + break; + + case DUMP_EXECUTION_ID: + if (value == null) { + unsetDumpExecutionId(); + } else { + setDumpExecutionId((Long)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SCHEDULED_EXECUTION_ID: + return getScheduledExecutionId(); + + case POLICY: + return getPolicy(); + + case DUMP_EXECUTION_ID: + return getDumpExecutionId(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SCHEDULED_EXECUTION_ID: + return isSetScheduledExecutionId(); + case POLICY: + return isSetPolicy(); + case DUMP_EXECUTION_ID: + return isSetDumpExecutionId(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetReplicationMetricsRequest) + return this.equals((GetReplicationMetricsRequest)that); + return false; + } + + public boolean equals(GetReplicationMetricsRequest that) { + if (that == null) + return false; + + boolean this_present_scheduledExecutionId = true && this.isSetScheduledExecutionId(); + boolean that_present_scheduledExecutionId = true && that.isSetScheduledExecutionId(); + if (this_present_scheduledExecutionId || that_present_scheduledExecutionId) { + if (!(this_present_scheduledExecutionId && that_present_scheduledExecutionId)) + return false; + if (this.scheduledExecutionId != that.scheduledExecutionId) + return false; + } + + boolean this_present_policy = true && this.isSetPolicy(); + boolean that_present_policy = true && that.isSetPolicy(); + if (this_present_policy || that_present_policy) { + if (!(this_present_policy && that_present_policy)) + return false; + if (!this.policy.equals(that.policy)) + return false; + } + + boolean this_present_dumpExecutionId = true && this.isSetDumpExecutionId(); + boolean that_present_dumpExecutionId = true && that.isSetDumpExecutionId(); + if (this_present_dumpExecutionId || that_present_dumpExecutionId) { + if (!(this_present_dumpExecutionId && that_present_dumpExecutionId)) + return false; + if (this.dumpExecutionId != that.dumpExecutionId) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_scheduledExecutionId = true && (isSetScheduledExecutionId()); + list.add(present_scheduledExecutionId); + if (present_scheduledExecutionId) + list.add(scheduledExecutionId); + + boolean present_policy = true && (isSetPolicy()); + list.add(present_policy); + if (present_policy) + list.add(policy); + + boolean present_dumpExecutionId = true && (isSetDumpExecutionId()); + list.add(present_dumpExecutionId); + if (present_dumpExecutionId) + list.add(dumpExecutionId); + + return list.hashCode(); + } + + @Override + public int compareTo(GetReplicationMetricsRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetScheduledExecutionId()).compareTo(other.isSetScheduledExecutionId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetScheduledExecutionId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.scheduledExecutionId, other.scheduledExecutionId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPolicy()).compareTo(other.isSetPolicy()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPolicy()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.policy, other.policy); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDumpExecutionId()).compareTo(other.isSetDumpExecutionId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDumpExecutionId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dumpExecutionId, other.dumpExecutionId); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetReplicationMetricsRequest("); + boolean first = true; + + if (isSetScheduledExecutionId()) { + sb.append("scheduledExecutionId:"); + sb.append(this.scheduledExecutionId); + first = false; + } + if (isSetPolicy()) { + if (!first) sb.append(", "); + sb.append("policy:"); + if (this.policy == null) { + sb.append("null"); + } else { + sb.append(this.policy); + } + first = false; + } + if (isSetDumpExecutionId()) { + if (!first) sb.append(", "); + sb.append("dumpExecutionId:"); + sb.append(this.dumpExecutionId); + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetReplicationMetricsRequestStandardSchemeFactory implements SchemeFactory { + public GetReplicationMetricsRequestStandardScheme getScheme() { + return new GetReplicationMetricsRequestStandardScheme(); + } + } + + private static class GetReplicationMetricsRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetReplicationMetricsRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // SCHEDULED_EXECUTION_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.scheduledExecutionId = iprot.readI64(); + struct.setScheduledExecutionIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // POLICY + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.policy = iprot.readString(); + struct.setPolicyIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // DUMP_EXECUTION_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.dumpExecutionId = iprot.readI64(); + struct.setDumpExecutionIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetReplicationMetricsRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.isSetScheduledExecutionId()) { + oprot.writeFieldBegin(SCHEDULED_EXECUTION_ID_FIELD_DESC); + oprot.writeI64(struct.scheduledExecutionId); + oprot.writeFieldEnd(); + } + if (struct.policy != null) { + if (struct.isSetPolicy()) { + oprot.writeFieldBegin(POLICY_FIELD_DESC); + oprot.writeString(struct.policy); + oprot.writeFieldEnd(); + } + } + if (struct.isSetDumpExecutionId()) { + oprot.writeFieldBegin(DUMP_EXECUTION_ID_FIELD_DESC); + oprot.writeI64(struct.dumpExecutionId); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetReplicationMetricsRequestTupleSchemeFactory implements SchemeFactory { + public GetReplicationMetricsRequestTupleScheme getScheme() { + return new GetReplicationMetricsRequestTupleScheme(); + } + } + + private static class GetReplicationMetricsRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetReplicationMetricsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetScheduledExecutionId()) { + optionals.set(0); + } + if (struct.isSetPolicy()) { + optionals.set(1); + } + if (struct.isSetDumpExecutionId()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetScheduledExecutionId()) { + oprot.writeI64(struct.scheduledExecutionId); + } + if (struct.isSetPolicy()) { + oprot.writeString(struct.policy); + } + if (struct.isSetDumpExecutionId()) { + oprot.writeI64(struct.dumpExecutionId); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetReplicationMetricsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.scheduledExecutionId = iprot.readI64(); + struct.setScheduledExecutionIdIsSet(true); + } + if (incoming.get(1)) { + struct.policy = iprot.readString(); + struct.setPolicyIsSet(true); + } + if (incoming.get(2)) { + struct.dumpExecutionId = iprot.readI64(); + struct.setDumpExecutionIdIsSet(true); + } + } + } + +} + diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetricList.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetricList.java new file mode 100644 index 0000000000..e247be530c --- /dev/null +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetricList.java @@ -0,0 +1,443 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ReplicationMetricList implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ReplicationMetricList"); + + private static final org.apache.thrift.protocol.TField REPLICATION_METRIC_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("replicationMetricList", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new ReplicationMetricListStandardSchemeFactory()); + schemes.put(TupleScheme.class, new ReplicationMetricListTupleSchemeFactory()); + } + + private List replicationMetricList; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REPLICATION_METRIC_LIST((short)1, "replicationMetricList"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REPLICATION_METRIC_LIST + return REPLICATION_METRIC_LIST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REPLICATION_METRIC_LIST, new org.apache.thrift.meta_data.FieldMetaData("replicationMetricList", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ReplicationMetrics.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ReplicationMetricList.class, metaDataMap); + } + + public ReplicationMetricList() { + } + + public ReplicationMetricList( + List replicationMetricList) + { + this(); + this.replicationMetricList = replicationMetricList; + } + + /** + * Performs a deep copy on other. + */ + public ReplicationMetricList(ReplicationMetricList other) { + if (other.isSetReplicationMetricList()) { + List __this__replicationMetricList = new ArrayList(other.replicationMetricList.size()); + for (ReplicationMetrics other_element : other.replicationMetricList) { + __this__replicationMetricList.add(new ReplicationMetrics(other_element)); + } + this.replicationMetricList = __this__replicationMetricList; + } + } + + public ReplicationMetricList deepCopy() { + return new ReplicationMetricList(this); + } + + @Override + public void clear() { + this.replicationMetricList = null; + } + + public int getReplicationMetricListSize() { + return (this.replicationMetricList == null) ? 0 : this.replicationMetricList.size(); + } + + public java.util.Iterator getReplicationMetricListIterator() { + return (this.replicationMetricList == null) ? null : this.replicationMetricList.iterator(); + } + + public void addToReplicationMetricList(ReplicationMetrics elem) { + if (this.replicationMetricList == null) { + this.replicationMetricList = new ArrayList(); + } + this.replicationMetricList.add(elem); + } + + public List getReplicationMetricList() { + return this.replicationMetricList; + } + + public void setReplicationMetricList(List replicationMetricList) { + this.replicationMetricList = replicationMetricList; + } + + public void unsetReplicationMetricList() { + this.replicationMetricList = null; + } + + /** Returns true if field replicationMetricList is set (has been assigned a value) and false otherwise */ + public boolean isSetReplicationMetricList() { + return this.replicationMetricList != null; + } + + public void setReplicationMetricListIsSet(boolean value) { + if (!value) { + this.replicationMetricList = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REPLICATION_METRIC_LIST: + if (value == null) { + unsetReplicationMetricList(); + } else { + setReplicationMetricList((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REPLICATION_METRIC_LIST: + return getReplicationMetricList(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REPLICATION_METRIC_LIST: + return isSetReplicationMetricList(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof ReplicationMetricList) + return this.equals((ReplicationMetricList)that); + return false; + } + + public boolean equals(ReplicationMetricList that) { + if (that == null) + return false; + + boolean this_present_replicationMetricList = true && this.isSetReplicationMetricList(); + boolean that_present_replicationMetricList = true && that.isSetReplicationMetricList(); + if (this_present_replicationMetricList || that_present_replicationMetricList) { + if (!(this_present_replicationMetricList && that_present_replicationMetricList)) + return false; + if (!this.replicationMetricList.equals(that.replicationMetricList)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_replicationMetricList = true && (isSetReplicationMetricList()); + list.add(present_replicationMetricList); + if (present_replicationMetricList) + list.add(replicationMetricList); + + return list.hashCode(); + } + + @Override + public int compareTo(ReplicationMetricList other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetReplicationMetricList()).compareTo(other.isSetReplicationMetricList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReplicationMetricList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.replicationMetricList, other.replicationMetricList); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ReplicationMetricList("); + boolean first = true; + + sb.append("replicationMetricList:"); + if (this.replicationMetricList == null) { + sb.append("null"); + } else { + sb.append(this.replicationMetricList); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetReplicationMetricList()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'replicationMetricList' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class ReplicationMetricListStandardSchemeFactory implements SchemeFactory { + public ReplicationMetricListStandardScheme getScheme() { + return new ReplicationMetricListStandardScheme(); + } + } + + private static class ReplicationMetricListStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, ReplicationMetricList struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REPLICATION_METRIC_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1280 = iprot.readListBegin(); + struct.replicationMetricList = new ArrayList(_list1280.size); + ReplicationMetrics _elem1281; + for (int _i1282 = 0; _i1282 < _list1280.size; ++_i1282) + { + _elem1281 = new ReplicationMetrics(); + _elem1281.read(iprot); + struct.replicationMetricList.add(_elem1281); + } + iprot.readListEnd(); + } + struct.setReplicationMetricListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, ReplicationMetricList struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.replicationMetricList != null) { + oprot.writeFieldBegin(REPLICATION_METRIC_LIST_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.replicationMetricList.size())); + for (ReplicationMetrics _iter1283 : struct.replicationMetricList) + { + _iter1283.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class ReplicationMetricListTupleSchemeFactory implements SchemeFactory { + public ReplicationMetricListTupleScheme getScheme() { + return new ReplicationMetricListTupleScheme(); + } + } + + private static class ReplicationMetricListTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, ReplicationMetricList struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.replicationMetricList.size()); + for (ReplicationMetrics _iter1284 : struct.replicationMetricList) + { + _iter1284.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, ReplicationMetricList struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list1285 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.replicationMetricList = new ArrayList(_list1285.size); + ReplicationMetrics _elem1286; + for (int _i1287 = 0; _i1287 < _list1285.size; ++_i1287) + { + _elem1286 = new ReplicationMetrics(); + _elem1286.read(iprot); + struct.replicationMetricList.add(_elem1286); + } + } + struct.setReplicationMetricListIsSet(true); + } + } + +} + diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetrics.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetrics.java new file mode 100644 index 0000000000..5ee5cac7ca --- /dev/null +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetrics.java @@ -0,0 +1,799 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ReplicationMetrics implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ReplicationMetrics"); + + private static final org.apache.thrift.protocol.TField SCHEDULED_EXECUTION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("scheduledExecutionId", org.apache.thrift.protocol.TType.I64, (short)1); + private static final org.apache.thrift.protocol.TField POLICY_FIELD_DESC = new org.apache.thrift.protocol.TField("policy", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField DUMP_EXECUTION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("dumpExecutionId", org.apache.thrift.protocol.TType.I64, (short)3); + private static final org.apache.thrift.protocol.TField METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("metadata", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField PROGRESS_FIELD_DESC = new org.apache.thrift.protocol.TField("progress", org.apache.thrift.protocol.TType.STRING, (short)5); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new ReplicationMetricsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new ReplicationMetricsTupleSchemeFactory()); + } + + private long scheduledExecutionId; // required + private String policy; // required + private long dumpExecutionId; // required + private String metadata; // optional + private String progress; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SCHEDULED_EXECUTION_ID((short)1, "scheduledExecutionId"), + POLICY((short)2, "policy"), + DUMP_EXECUTION_ID((short)3, "dumpExecutionId"), + METADATA((short)4, "metadata"), + PROGRESS((short)5, "progress"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // SCHEDULED_EXECUTION_ID + return SCHEDULED_EXECUTION_ID; + case 2: // POLICY + return POLICY; + case 3: // DUMP_EXECUTION_ID + return DUMP_EXECUTION_ID; + case 4: // METADATA + return METADATA; + case 5: // PROGRESS + return PROGRESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __SCHEDULEDEXECUTIONID_ISSET_ID = 0; + private static final int __DUMPEXECUTIONID_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.METADATA,_Fields.PROGRESS}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SCHEDULED_EXECUTION_ID, new org.apache.thrift.meta_data.FieldMetaData("scheduledExecutionId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.POLICY, new org.apache.thrift.meta_data.FieldMetaData("policy", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DUMP_EXECUTION_ID, new org.apache.thrift.meta_data.FieldMetaData("dumpExecutionId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.METADATA, new org.apache.thrift.meta_data.FieldMetaData("metadata", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PROGRESS, new org.apache.thrift.meta_data.FieldMetaData("progress", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ReplicationMetrics.class, metaDataMap); + } + + public ReplicationMetrics() { + } + + public ReplicationMetrics( + long scheduledExecutionId, + String policy, + long dumpExecutionId) + { + this(); + this.scheduledExecutionId = scheduledExecutionId; + setScheduledExecutionIdIsSet(true); + this.policy = policy; + this.dumpExecutionId = dumpExecutionId; + setDumpExecutionIdIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public ReplicationMetrics(ReplicationMetrics other) { + __isset_bitfield = other.__isset_bitfield; + this.scheduledExecutionId = other.scheduledExecutionId; + if (other.isSetPolicy()) { + this.policy = other.policy; + } + this.dumpExecutionId = other.dumpExecutionId; + if (other.isSetMetadata()) { + this.metadata = other.metadata; + } + if (other.isSetProgress()) { + this.progress = other.progress; + } + } + + public ReplicationMetrics deepCopy() { + return new ReplicationMetrics(this); + } + + @Override + public void clear() { + setScheduledExecutionIdIsSet(false); + this.scheduledExecutionId = 0; + this.policy = null; + setDumpExecutionIdIsSet(false); + this.dumpExecutionId = 0; + this.metadata = null; + this.progress = null; + } + + public long getScheduledExecutionId() { + return this.scheduledExecutionId; + } + + public void setScheduledExecutionId(long scheduledExecutionId) { + this.scheduledExecutionId = scheduledExecutionId; + setScheduledExecutionIdIsSet(true); + } + + public void unsetScheduledExecutionId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SCHEDULEDEXECUTIONID_ISSET_ID); + } + + /** Returns true if field scheduledExecutionId is set (has been assigned a value) and false otherwise */ + public boolean isSetScheduledExecutionId() { + return EncodingUtils.testBit(__isset_bitfield, __SCHEDULEDEXECUTIONID_ISSET_ID); + } + + public void setScheduledExecutionIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SCHEDULEDEXECUTIONID_ISSET_ID, value); + } + + public String getPolicy() { + return this.policy; + } + + public void setPolicy(String policy) { + this.policy = policy; + } + + public void unsetPolicy() { + this.policy = null; + } + + /** Returns true if field policy is set (has been assigned a value) and false otherwise */ + public boolean isSetPolicy() { + return this.policy != null; + } + + public void setPolicyIsSet(boolean value) { + if (!value) { + this.policy = null; + } + } + + public long getDumpExecutionId() { + return this.dumpExecutionId; + } + + public void setDumpExecutionId(long dumpExecutionId) { + this.dumpExecutionId = dumpExecutionId; + setDumpExecutionIdIsSet(true); + } + + public void unsetDumpExecutionId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DUMPEXECUTIONID_ISSET_ID); + } + + /** Returns true if field dumpExecutionId is set (has been assigned a value) and false otherwise */ + public boolean isSetDumpExecutionId() { + return EncodingUtils.testBit(__isset_bitfield, __DUMPEXECUTIONID_ISSET_ID); + } + + public void setDumpExecutionIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DUMPEXECUTIONID_ISSET_ID, value); + } + + public String getMetadata() { + return this.metadata; + } + + public void setMetadata(String metadata) { + this.metadata = metadata; + } + + public void unsetMetadata() { + this.metadata = null; + } + + /** Returns true if field metadata is set (has been assigned a value) and false otherwise */ + public boolean isSetMetadata() { + return this.metadata != null; + } + + public void setMetadataIsSet(boolean value) { + if (!value) { + this.metadata = null; + } + } + + public String getProgress() { + return this.progress; + } + + public void setProgress(String progress) { + this.progress = progress; + } + + public void unsetProgress() { + this.progress = null; + } + + /** Returns true if field progress is set (has been assigned a value) and false otherwise */ + public boolean isSetProgress() { + return this.progress != null; + } + + public void setProgressIsSet(boolean value) { + if (!value) { + this.progress = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SCHEDULED_EXECUTION_ID: + if (value == null) { + unsetScheduledExecutionId(); + } else { + setScheduledExecutionId((Long)value); + } + break; + + case POLICY: + if (value == null) { + unsetPolicy(); + } else { + setPolicy((String)value); + } + break; + + case DUMP_EXECUTION_ID: + if (value == null) { + unsetDumpExecutionId(); + } else { + setDumpExecutionId((Long)value); + } + break; + + case METADATA: + if (value == null) { + unsetMetadata(); + } else { + setMetadata((String)value); + } + break; + + case PROGRESS: + if (value == null) { + unsetProgress(); + } else { + setProgress((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SCHEDULED_EXECUTION_ID: + return getScheduledExecutionId(); + + case POLICY: + return getPolicy(); + + case DUMP_EXECUTION_ID: + return getDumpExecutionId(); + + case METADATA: + return getMetadata(); + + case PROGRESS: + return getProgress(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SCHEDULED_EXECUTION_ID: + return isSetScheduledExecutionId(); + case POLICY: + return isSetPolicy(); + case DUMP_EXECUTION_ID: + return isSetDumpExecutionId(); + case METADATA: + return isSetMetadata(); + case PROGRESS: + return isSetProgress(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof ReplicationMetrics) + return this.equals((ReplicationMetrics)that); + return false; + } + + public boolean equals(ReplicationMetrics that) { + if (that == null) + return false; + + boolean this_present_scheduledExecutionId = true; + boolean that_present_scheduledExecutionId = true; + if (this_present_scheduledExecutionId || that_present_scheduledExecutionId) { + if (!(this_present_scheduledExecutionId && that_present_scheduledExecutionId)) + return false; + if (this.scheduledExecutionId != that.scheduledExecutionId) + return false; + } + + boolean this_present_policy = true && this.isSetPolicy(); + boolean that_present_policy = true && that.isSetPolicy(); + if (this_present_policy || that_present_policy) { + if (!(this_present_policy && that_present_policy)) + return false; + if (!this.policy.equals(that.policy)) + return false; + } + + boolean this_present_dumpExecutionId = true; + boolean that_present_dumpExecutionId = true; + if (this_present_dumpExecutionId || that_present_dumpExecutionId) { + if (!(this_present_dumpExecutionId && that_present_dumpExecutionId)) + return false; + if (this.dumpExecutionId != that.dumpExecutionId) + return false; + } + + boolean this_present_metadata = true && this.isSetMetadata(); + boolean that_present_metadata = true && that.isSetMetadata(); + if (this_present_metadata || that_present_metadata) { + if (!(this_present_metadata && that_present_metadata)) + return false; + if (!this.metadata.equals(that.metadata)) + return false; + } + + boolean this_present_progress = true && this.isSetProgress(); + boolean that_present_progress = true && that.isSetProgress(); + if (this_present_progress || that_present_progress) { + if (!(this_present_progress && that_present_progress)) + return false; + if (!this.progress.equals(that.progress)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_scheduledExecutionId = true; + list.add(present_scheduledExecutionId); + if (present_scheduledExecutionId) + list.add(scheduledExecutionId); + + boolean present_policy = true && (isSetPolicy()); + list.add(present_policy); + if (present_policy) + list.add(policy); + + boolean present_dumpExecutionId = true; + list.add(present_dumpExecutionId); + if (present_dumpExecutionId) + list.add(dumpExecutionId); + + boolean present_metadata = true && (isSetMetadata()); + list.add(present_metadata); + if (present_metadata) + list.add(metadata); + + boolean present_progress = true && (isSetProgress()); + list.add(present_progress); + if (present_progress) + list.add(progress); + + return list.hashCode(); + } + + @Override + public int compareTo(ReplicationMetrics other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetScheduledExecutionId()).compareTo(other.isSetScheduledExecutionId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetScheduledExecutionId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.scheduledExecutionId, other.scheduledExecutionId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPolicy()).compareTo(other.isSetPolicy()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPolicy()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.policy, other.policy); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDumpExecutionId()).compareTo(other.isSetDumpExecutionId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDumpExecutionId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dumpExecutionId, other.dumpExecutionId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMetadata()).compareTo(other.isSetMetadata()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMetadata()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.metadata, other.metadata); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetProgress()).compareTo(other.isSetProgress()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetProgress()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.progress, other.progress); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ReplicationMetrics("); + boolean first = true; + + sb.append("scheduledExecutionId:"); + sb.append(this.scheduledExecutionId); + first = false; + if (!first) sb.append(", "); + sb.append("policy:"); + if (this.policy == null) { + sb.append("null"); + } else { + sb.append(this.policy); + } + first = false; + if (!first) sb.append(", "); + sb.append("dumpExecutionId:"); + sb.append(this.dumpExecutionId); + first = false; + if (isSetMetadata()) { + if (!first) sb.append(", "); + sb.append("metadata:"); + if (this.metadata == null) { + sb.append("null"); + } else { + sb.append(this.metadata); + } + first = false; + } + if (isSetProgress()) { + if (!first) sb.append(", "); + sb.append("progress:"); + if (this.progress == null) { + sb.append("null"); + } else { + sb.append(this.progress); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetScheduledExecutionId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'scheduledExecutionId' is unset! Struct:" + toString()); + } + + if (!isSetPolicy()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'policy' is unset! Struct:" + toString()); + } + + if (!isSetDumpExecutionId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dumpExecutionId' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class ReplicationMetricsStandardSchemeFactory implements SchemeFactory { + public ReplicationMetricsStandardScheme getScheme() { + return new ReplicationMetricsStandardScheme(); + } + } + + private static class ReplicationMetricsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, ReplicationMetrics struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // SCHEDULED_EXECUTION_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.scheduledExecutionId = iprot.readI64(); + struct.setScheduledExecutionIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // POLICY + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.policy = iprot.readString(); + struct.setPolicyIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // DUMP_EXECUTION_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.dumpExecutionId = iprot.readI64(); + struct.setDumpExecutionIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // METADATA + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.metadata = iprot.readString(); + struct.setMetadataIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // PROGRESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.progress = iprot.readString(); + struct.setProgressIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, ReplicationMetrics struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(SCHEDULED_EXECUTION_ID_FIELD_DESC); + oprot.writeI64(struct.scheduledExecutionId); + oprot.writeFieldEnd(); + if (struct.policy != null) { + oprot.writeFieldBegin(POLICY_FIELD_DESC); + oprot.writeString(struct.policy); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(DUMP_EXECUTION_ID_FIELD_DESC); + oprot.writeI64(struct.dumpExecutionId); + oprot.writeFieldEnd(); + if (struct.metadata != null) { + if (struct.isSetMetadata()) { + oprot.writeFieldBegin(METADATA_FIELD_DESC); + oprot.writeString(struct.metadata); + oprot.writeFieldEnd(); + } + } + if (struct.progress != null) { + if (struct.isSetProgress()) { + oprot.writeFieldBegin(PROGRESS_FIELD_DESC); + oprot.writeString(struct.progress); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class ReplicationMetricsTupleSchemeFactory implements SchemeFactory { + public ReplicationMetricsTupleScheme getScheme() { + return new ReplicationMetricsTupleScheme(); + } + } + + private static class ReplicationMetricsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, ReplicationMetrics struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.scheduledExecutionId); + oprot.writeString(struct.policy); + oprot.writeI64(struct.dumpExecutionId); + BitSet optionals = new BitSet(); + if (struct.isSetMetadata()) { + optionals.set(0); + } + if (struct.isSetProgress()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetMetadata()) { + oprot.writeString(struct.metadata); + } + if (struct.isSetProgress()) { + oprot.writeString(struct.progress); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, ReplicationMetrics struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.scheduledExecutionId = iprot.readI64(); + struct.setScheduledExecutionIdIsSet(true); + struct.policy = iprot.readString(); + struct.setPolicyIsSet(true); + struct.dumpExecutionId = iprot.readI64(); + struct.setDumpExecutionIdIsSet(true); + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.metadata = iprot.readString(); + struct.setMetadataIsSet(true); + } + if (incoming.get(1)) { + struct.progress = iprot.readString(); + struct.setProgressIsSet(true); + } + } + } + +} + diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 7475d5d00f..e89c15546b 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -520,6 +520,10 @@ public ScheduledQuery get_scheduled_query(ScheduledQueryKey scheduleKey) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public void add_replication_metrics(ReplicationMetricList replicationMetricList) throws MetaException, org.apache.thrift.TException; + + public ReplicationMetricList get_replication_metrics(GetReplicationMetricsRequest rqst) throws MetaException, org.apache.thrift.TException; + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface { @@ -1002,6 +1006,10 @@ public void get_scheduled_query(ScheduledQueryKey scheduleKey, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void add_replication_metrics(ReplicationMetricList replicationMetricList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void get_replication_metrics(GetReplicationMetricsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface { @@ -7852,6 +7860,55 @@ public ScheduledQuery recv_get_scheduled_query() throws MetaException, NoSuchObj throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_scheduled_query failed: unknown result"); } + public void add_replication_metrics(ReplicationMetricList replicationMetricList) throws MetaException, org.apache.thrift.TException + { + send_add_replication_metrics(replicationMetricList); + recv_add_replication_metrics(); + } + + public void send_add_replication_metrics(ReplicationMetricList replicationMetricList) throws org.apache.thrift.TException + { + add_replication_metrics_args args = new add_replication_metrics_args(); + args.setReplicationMetricList(replicationMetricList); + sendBase("add_replication_metrics", args); + } + + public void recv_add_replication_metrics() throws MetaException, org.apache.thrift.TException + { + add_replication_metrics_result result = new add_replication_metrics_result(); + receiveBase(result, "add_replication_metrics"); + if (result.o1 != null) { + throw result.o1; + } + return; + } + + public ReplicationMetricList get_replication_metrics(GetReplicationMetricsRequest rqst) throws MetaException, org.apache.thrift.TException + { + send_get_replication_metrics(rqst); + return recv_get_replication_metrics(); + } + + public void send_get_replication_metrics(GetReplicationMetricsRequest rqst) throws org.apache.thrift.TException + { + get_replication_metrics_args args = new get_replication_metrics_args(); + args.setRqst(rqst); + sendBase("get_replication_metrics", args); + } + + public ReplicationMetricList recv_get_replication_metrics() throws MetaException, org.apache.thrift.TException + { + get_replication_metrics_result result = new get_replication_metrics_result(); + receiveBase(result, "get_replication_metrics"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_replication_metrics failed: unknown result"); + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class AsyncClient extends com.facebook.fb303.FacebookService.AsyncClient implements AsyncIface { @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { @@ -16019,6 +16076,70 @@ public ScheduledQuery getResult() throws MetaException, NoSuchObjectException, o } } + public void add_replication_metrics(ReplicationMetricList replicationMetricList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + add_replication_metrics_call method_call = new add_replication_metrics_call(replicationMetricList, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_replication_metrics_call extends org.apache.thrift.async.TAsyncMethodCall { + private ReplicationMetricList replicationMetricList; + public add_replication_metrics_call(ReplicationMetricList replicationMetricList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.replicationMetricList = replicationMetricList; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("add_replication_metrics", org.apache.thrift.protocol.TMessageType.CALL, 0)); + add_replication_metrics_args args = new add_replication_metrics_args(); + args.setReplicationMetricList(replicationMetricList); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_add_replication_metrics(); + } + } + + public void get_replication_metrics(GetReplicationMetricsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_replication_metrics_call method_call = new get_replication_metrics_call(rqst, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_replication_metrics_call extends org.apache.thrift.async.TAsyncMethodCall { + private GetReplicationMetricsRequest rqst; + public get_replication_metrics_call(GetReplicationMetricsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.rqst = rqst; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_replication_metrics", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_replication_metrics_args args = new get_replication_metrics_args(); + args.setRqst(rqst); + args.write(prot); + prot.writeMessageEnd(); + } + + public ReplicationMetricList getResult() throws MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_replication_metrics(); + } + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Processor extends com.facebook.fb303.FacebookService.Processor implements org.apache.thrift.TProcessor { @@ -16271,6 +16392,8 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public add_replication_metrics() { + super("add_replication_metrics"); + } + + public add_replication_metrics_args getEmptyArgsInstance() { + return new add_replication_metrics_args(); + } + + protected boolean isOneway() { + return false; + } + + public add_replication_metrics_result getResult(I iface, add_replication_metrics_args args) throws org.apache.thrift.TException { + add_replication_metrics_result result = new add_replication_metrics_result(); + try { + iface.add_replication_metrics(args.replicationMetricList); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_replication_metrics extends org.apache.thrift.ProcessFunction { + public get_replication_metrics() { + super("get_replication_metrics"); + } + + public get_replication_metrics_args getEmptyArgsInstance() { + return new get_replication_metrics_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_replication_metrics_result getResult(I iface, get_replication_metrics_args args) throws org.apache.thrift.TException { + get_replication_metrics_result result = new get_replication_metrics_result(); + try { + result.success = iface.get_replication_metrics(args.rqst); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class AsyncProcessor extends com.facebook.fb303.FacebookService.AsyncProcessor { @@ -22646,6 +22817,8 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { + public add_replication_metrics() { + super("add_replication_metrics"); + } + + public add_replication_metrics_args getEmptyArgsInstance() { + return new add_replication_metrics_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + add_replication_metrics_result result = new add_replication_metrics_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + add_replication_metrics_result result = new add_replication_metrics_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, add_replication_metrics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.add_replication_metrics(args.replicationMetricList,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_replication_metrics extends org.apache.thrift.AsyncProcessFunction { + public get_replication_metrics() { + super("get_replication_metrics"); + } + + public get_replication_metrics_args getEmptyArgsInstance() { + return new get_replication_metrics_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(ReplicationMetricList o) { + get_replication_metrics_result result = new get_replication_metrics_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_replication_metrics_result result = new get_replication_metrics_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_replication_metrics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_replication_metrics(args.rqst,resultHandler); + } + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class getMetaConf_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { @@ -48048,13 +48334,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1280 = iprot.readListBegin(); - struct.success = new ArrayList(_list1280.size); - String _elem1281; - for (int _i1282 = 0; _i1282 < _list1280.size; ++_i1282) + org.apache.thrift.protocol.TList _list1288 = iprot.readListBegin(); + struct.success = new ArrayList(_list1288.size); + String _elem1289; + for (int _i1290 = 0; _i1290 < _list1288.size; ++_i1290) { - _elem1281 = iprot.readString(); - struct.success.add(_elem1281); + _elem1289 = iprot.readString(); + struct.success.add(_elem1289); } iprot.readListEnd(); } @@ -48089,9 +48375,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1283 : struct.success) + for (String _iter1291 : struct.success) { - oprot.writeString(_iter1283); + oprot.writeString(_iter1291); } oprot.writeListEnd(); } @@ -48130,9 +48416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1284 : struct.success) + for (String _iter1292 : struct.success) { - oprot.writeString(_iter1284); + oprot.writeString(_iter1292); } } } @@ -48147,13 +48433,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1285 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1285.size); - String _elem1286; - for (int _i1287 = 0; _i1287 < _list1285.size; ++_i1287) + org.apache.thrift.protocol.TList _list1293 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1293.size); + String _elem1294; + for (int _i1295 = 0; _i1295 < _list1293.size; ++_i1295) { - _elem1286 = iprot.readString(); - struct.success.add(_elem1286); + _elem1294 = iprot.readString(); + struct.success.add(_elem1294); } } struct.setSuccessIsSet(true); @@ -48807,13 +49093,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1288 = iprot.readListBegin(); - struct.success = new ArrayList(_list1288.size); - String _elem1289; - for (int _i1290 = 0; _i1290 < _list1288.size; ++_i1290) + org.apache.thrift.protocol.TList _list1296 = iprot.readListBegin(); + struct.success = new ArrayList(_list1296.size); + String _elem1297; + for (int _i1298 = 0; _i1298 < _list1296.size; ++_i1298) { - _elem1289 = iprot.readString(); - struct.success.add(_elem1289); + _elem1297 = iprot.readString(); + struct.success.add(_elem1297); } iprot.readListEnd(); } @@ -48848,9 +49134,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1291 : struct.success) + for (String _iter1299 : struct.success) { - oprot.writeString(_iter1291); + oprot.writeString(_iter1299); } oprot.writeListEnd(); } @@ -48889,9 +49175,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1292 : struct.success) + for (String _iter1300 : struct.success) { - oprot.writeString(_iter1292); + oprot.writeString(_iter1300); } } } @@ -48906,13 +49192,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1293 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1293.size); - String _elem1294; - for (int _i1295 = 0; _i1295 < _list1293.size; ++_i1295) + org.apache.thrift.protocol.TList _list1301 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1301.size); + String _elem1302; + for (int _i1303 = 0; _i1303 < _list1301.size; ++_i1303) { - _elem1294 = iprot.readString(); - struct.success.add(_elem1294); + _elem1302 = iprot.readString(); + struct.success.add(_elem1302); } } struct.setSuccessIsSet(true); @@ -53519,16 +53805,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1296 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1296.size); - String _key1297; - Type _val1298; - for (int _i1299 = 0; _i1299 < _map1296.size; ++_i1299) + org.apache.thrift.protocol.TMap _map1304 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1304.size); + String _key1305; + Type _val1306; + for (int _i1307 = 0; _i1307 < _map1304.size; ++_i1307) { - _key1297 = iprot.readString(); - _val1298 = new Type(); - _val1298.read(iprot); - struct.success.put(_key1297, _val1298); + _key1305 = iprot.readString(); + _val1306 = new Type(); + _val1306.read(iprot); + struct.success.put(_key1305, _val1306); } iprot.readMapEnd(); } @@ -53563,10 +53849,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter1300 : struct.success.entrySet()) + for (Map.Entry _iter1308 : struct.success.entrySet()) { - oprot.writeString(_iter1300.getKey()); - _iter1300.getValue().write(oprot); + oprot.writeString(_iter1308.getKey()); + _iter1308.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -53605,10 +53891,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1301 : struct.success.entrySet()) + for (Map.Entry _iter1309 : struct.success.entrySet()) { - oprot.writeString(_iter1301.getKey()); - _iter1301.getValue().write(oprot); + oprot.writeString(_iter1309.getKey()); + _iter1309.getValue().write(oprot); } } } @@ -53623,16 +53909,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1302 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map1302.size); - String _key1303; - Type _val1304; - for (int _i1305 = 0; _i1305 < _map1302.size; ++_i1305) + org.apache.thrift.protocol.TMap _map1310 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map1310.size); + String _key1311; + Type _val1312; + for (int _i1313 = 0; _i1313 < _map1310.size; ++_i1313) { - _key1303 = iprot.readString(); - _val1304 = new Type(); - _val1304.read(iprot); - struct.success.put(_key1303, _val1304); + _key1311 = iprot.readString(); + _val1312 = new Type(); + _val1312.read(iprot); + struct.success.put(_key1311, _val1312); } } struct.setSuccessIsSet(true); @@ -54667,14 +54953,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1306 = iprot.readListBegin(); - struct.success = new ArrayList(_list1306.size); - FieldSchema _elem1307; - for (int _i1308 = 0; _i1308 < _list1306.size; ++_i1308) + org.apache.thrift.protocol.TList _list1314 = iprot.readListBegin(); + struct.success = new ArrayList(_list1314.size); + FieldSchema _elem1315; + for (int _i1316 = 0; _i1316 < _list1314.size; ++_i1316) { - _elem1307 = new FieldSchema(); - _elem1307.read(iprot); - struct.success.add(_elem1307); + _elem1315 = new FieldSchema(); + _elem1315.read(iprot); + struct.success.add(_elem1315); } iprot.readListEnd(); } @@ -54727,9 +55013,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1309 : struct.success) + for (FieldSchema _iter1317 : struct.success) { - _iter1309.write(oprot); + _iter1317.write(oprot); } oprot.writeListEnd(); } @@ -54784,9 +55070,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1310 : struct.success) + for (FieldSchema _iter1318 : struct.success) { - _iter1310.write(oprot); + _iter1318.write(oprot); } } } @@ -54807,14 +55093,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1311 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1311.size); - FieldSchema _elem1312; - for (int _i1313 = 0; _i1313 < _list1311.size; ++_i1313) + org.apache.thrift.protocol.TList _list1319 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1319.size); + FieldSchema _elem1320; + for (int _i1321 = 0; _i1321 < _list1319.size; ++_i1321) { - _elem1312 = new FieldSchema(); - _elem1312.read(iprot); - struct.success.add(_elem1312); + _elem1320 = new FieldSchema(); + _elem1320.read(iprot); + struct.success.add(_elem1320); } } struct.setSuccessIsSet(true); @@ -55968,14 +56254,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1314 = iprot.readListBegin(); - struct.success = new ArrayList(_list1314.size); - FieldSchema _elem1315; - for (int _i1316 = 0; _i1316 < _list1314.size; ++_i1316) + org.apache.thrift.protocol.TList _list1322 = iprot.readListBegin(); + struct.success = new ArrayList(_list1322.size); + FieldSchema _elem1323; + for (int _i1324 = 0; _i1324 < _list1322.size; ++_i1324) { - _elem1315 = new FieldSchema(); - _elem1315.read(iprot); - struct.success.add(_elem1315); + _elem1323 = new FieldSchema(); + _elem1323.read(iprot); + struct.success.add(_elem1323); } iprot.readListEnd(); } @@ -56028,9 +56314,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1317 : struct.success) + for (FieldSchema _iter1325 : struct.success) { - _iter1317.write(oprot); + _iter1325.write(oprot); } oprot.writeListEnd(); } @@ -56085,9 +56371,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1318 : struct.success) + for (FieldSchema _iter1326 : struct.success) { - _iter1318.write(oprot); + _iter1326.write(oprot); } } } @@ -56108,14 +56394,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1319 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1319.size); - FieldSchema _elem1320; - for (int _i1321 = 0; _i1321 < _list1319.size; ++_i1321) + org.apache.thrift.protocol.TList _list1327 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1327.size); + FieldSchema _elem1328; + for (int _i1329 = 0; _i1329 < _list1327.size; ++_i1329) { - _elem1320 = new FieldSchema(); - _elem1320.read(iprot); - struct.success.add(_elem1320); + _elem1328 = new FieldSchema(); + _elem1328.read(iprot); + struct.success.add(_elem1328); } } struct.setSuccessIsSet(true); @@ -58204,14 +58490,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1322 = iprot.readListBegin(); - struct.success = new ArrayList(_list1322.size); - FieldSchema _elem1323; - for (int _i1324 = 0; _i1324 < _list1322.size; ++_i1324) + org.apache.thrift.protocol.TList _list1330 = iprot.readListBegin(); + struct.success = new ArrayList(_list1330.size); + FieldSchema _elem1331; + for (int _i1332 = 0; _i1332 < _list1330.size; ++_i1332) { - _elem1323 = new FieldSchema(); - _elem1323.read(iprot); - struct.success.add(_elem1323); + _elem1331 = new FieldSchema(); + _elem1331.read(iprot); + struct.success.add(_elem1331); } iprot.readListEnd(); } @@ -58264,9 +58550,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1325 : struct.success) + for (FieldSchema _iter1333 : struct.success) { - _iter1325.write(oprot); + _iter1333.write(oprot); } oprot.writeListEnd(); } @@ -58321,9 +58607,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1326 : struct.success) + for (FieldSchema _iter1334 : struct.success) { - _iter1326.write(oprot); + _iter1334.write(oprot); } } } @@ -58344,14 +58630,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1327 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1327.size); - FieldSchema _elem1328; - for (int _i1329 = 0; _i1329 < _list1327.size; ++_i1329) + org.apache.thrift.protocol.TList _list1335 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1335.size); + FieldSchema _elem1336; + for (int _i1337 = 0; _i1337 < _list1335.size; ++_i1337) { - _elem1328 = new FieldSchema(); - _elem1328.read(iprot); - struct.success.add(_elem1328); + _elem1336 = new FieldSchema(); + _elem1336.read(iprot); + struct.success.add(_elem1336); } } struct.setSuccessIsSet(true); @@ -59505,14 +59791,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1330 = iprot.readListBegin(); - struct.success = new ArrayList(_list1330.size); - FieldSchema _elem1331; - for (int _i1332 = 0; _i1332 < _list1330.size; ++_i1332) + org.apache.thrift.protocol.TList _list1338 = iprot.readListBegin(); + struct.success = new ArrayList(_list1338.size); + FieldSchema _elem1339; + for (int _i1340 = 0; _i1340 < _list1338.size; ++_i1340) { - _elem1331 = new FieldSchema(); - _elem1331.read(iprot); - struct.success.add(_elem1331); + _elem1339 = new FieldSchema(); + _elem1339.read(iprot); + struct.success.add(_elem1339); } iprot.readListEnd(); } @@ -59565,9 +59851,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1333 : struct.success) + for (FieldSchema _iter1341 : struct.success) { - _iter1333.write(oprot); + _iter1341.write(oprot); } oprot.writeListEnd(); } @@ -59622,9 +59908,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1334 : struct.success) + for (FieldSchema _iter1342 : struct.success) { - _iter1334.write(oprot); + _iter1342.write(oprot); } } } @@ -59645,14 +59931,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1335 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1335.size); - FieldSchema _elem1336; - for (int _i1337 = 0; _i1337 < _list1335.size; ++_i1337) + org.apache.thrift.protocol.TList _list1343 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1343.size); + FieldSchema _elem1344; + for (int _i1345 = 0; _i1345 < _list1343.size; ++_i1345) { - _elem1336 = new FieldSchema(); - _elem1336.read(iprot); - struct.success.add(_elem1336); + _elem1344 = new FieldSchema(); + _elem1344.read(iprot); + struct.success.add(_elem1344); } } struct.setSuccessIsSet(true); @@ -63825,14 +64111,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1338 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list1338.size); - SQLPrimaryKey _elem1339; - for (int _i1340 = 0; _i1340 < _list1338.size; ++_i1340) + org.apache.thrift.protocol.TList _list1346 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list1346.size); + SQLPrimaryKey _elem1347; + for (int _i1348 = 0; _i1348 < _list1346.size; ++_i1348) { - _elem1339 = new SQLPrimaryKey(); - _elem1339.read(iprot); - struct.primaryKeys.add(_elem1339); + _elem1347 = new SQLPrimaryKey(); + _elem1347.read(iprot); + struct.primaryKeys.add(_elem1347); } iprot.readListEnd(); } @@ -63844,14 +64130,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1341 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list1341.size); - SQLForeignKey _elem1342; - for (int _i1343 = 0; _i1343 < _list1341.size; ++_i1343) + org.apache.thrift.protocol.TList _list1349 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list1349.size); + SQLForeignKey _elem1350; + for (int _i1351 = 0; _i1351 < _list1349.size; ++_i1351) { - _elem1342 = new SQLForeignKey(); - _elem1342.read(iprot); - struct.foreignKeys.add(_elem1342); + _elem1350 = new SQLForeignKey(); + _elem1350.read(iprot); + struct.foreignKeys.add(_elem1350); } iprot.readListEnd(); } @@ -63863,14 +64149,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1344 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list1344.size); - SQLUniqueConstraint _elem1345; - for (int _i1346 = 0; _i1346 < _list1344.size; ++_i1346) + org.apache.thrift.protocol.TList _list1352 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list1352.size); + SQLUniqueConstraint _elem1353; + for (int _i1354 = 0; _i1354 < _list1352.size; ++_i1354) { - _elem1345 = new SQLUniqueConstraint(); - _elem1345.read(iprot); - struct.uniqueConstraints.add(_elem1345); + _elem1353 = new SQLUniqueConstraint(); + _elem1353.read(iprot); + struct.uniqueConstraints.add(_elem1353); } iprot.readListEnd(); } @@ -63882,14 +64168,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1347 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list1347.size); - SQLNotNullConstraint _elem1348; - for (int _i1349 = 0; _i1349 < _list1347.size; ++_i1349) + org.apache.thrift.protocol.TList _list1355 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list1355.size); + SQLNotNullConstraint _elem1356; + for (int _i1357 = 0; _i1357 < _list1355.size; ++_i1357) { - _elem1348 = new SQLNotNullConstraint(); - _elem1348.read(iprot); - struct.notNullConstraints.add(_elem1348); + _elem1356 = new SQLNotNullConstraint(); + _elem1356.read(iprot); + struct.notNullConstraints.add(_elem1356); } iprot.readListEnd(); } @@ -63901,14 +64187,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 6: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1350 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list1350.size); - SQLDefaultConstraint _elem1351; - for (int _i1352 = 0; _i1352 < _list1350.size; ++_i1352) + org.apache.thrift.protocol.TList _list1358 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list1358.size); + SQLDefaultConstraint _elem1359; + for (int _i1360 = 0; _i1360 < _list1358.size; ++_i1360) { - _elem1351 = new SQLDefaultConstraint(); - _elem1351.read(iprot); - struct.defaultConstraints.add(_elem1351); + _elem1359 = new SQLDefaultConstraint(); + _elem1359.read(iprot); + struct.defaultConstraints.add(_elem1359); } iprot.readListEnd(); } @@ -63920,14 +64206,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 7: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1353 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList(_list1353.size); - SQLCheckConstraint _elem1354; - for (int _i1355 = 0; _i1355 < _list1353.size; ++_i1355) + org.apache.thrift.protocol.TList _list1361 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list1361.size); + SQLCheckConstraint _elem1362; + for (int _i1363 = 0; _i1363 < _list1361.size; ++_i1363) { - _elem1354 = new SQLCheckConstraint(); - _elem1354.read(iprot); - struct.checkConstraints.add(_elem1354); + _elem1362 = new SQLCheckConstraint(); + _elem1362.read(iprot); + struct.checkConstraints.add(_elem1362); } iprot.readListEnd(); } @@ -63958,9 +64244,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter1356 : struct.primaryKeys) + for (SQLPrimaryKey _iter1364 : struct.primaryKeys) { - _iter1356.write(oprot); + _iter1364.write(oprot); } oprot.writeListEnd(); } @@ -63970,9 +64256,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter1357 : struct.foreignKeys) + for (SQLForeignKey _iter1365 : struct.foreignKeys) { - _iter1357.write(oprot); + _iter1365.write(oprot); } oprot.writeListEnd(); } @@ -63982,9 +64268,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter1358 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1366 : struct.uniqueConstraints) { - _iter1358.write(oprot); + _iter1366.write(oprot); } oprot.writeListEnd(); } @@ -63994,9 +64280,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter1359 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1367 : struct.notNullConstraints) { - _iter1359.write(oprot); + _iter1367.write(oprot); } oprot.writeListEnd(); } @@ -64006,9 +64292,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter1360 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1368 : struct.defaultConstraints) { - _iter1360.write(oprot); + _iter1368.write(oprot); } oprot.writeListEnd(); } @@ -64018,9 +64304,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter1361 : struct.checkConstraints) + for (SQLCheckConstraint _iter1369 : struct.checkConstraints) { - _iter1361.write(oprot); + _iter1369.write(oprot); } oprot.writeListEnd(); } @@ -64072,54 +64358,54 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter1362 : struct.primaryKeys) + for (SQLPrimaryKey _iter1370 : struct.primaryKeys) { - _iter1362.write(oprot); + _iter1370.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter1363 : struct.foreignKeys) + for (SQLForeignKey _iter1371 : struct.foreignKeys) { - _iter1363.write(oprot); + _iter1371.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter1364 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1372 : struct.uniqueConstraints) { - _iter1364.write(oprot); + _iter1372.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter1365 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1373 : struct.notNullConstraints) { - _iter1365.write(oprot); + _iter1373.write(oprot); } } } if (struct.isSetDefaultConstraints()) { { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter1366 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1374 : struct.defaultConstraints) { - _iter1366.write(oprot); + _iter1374.write(oprot); } } } if (struct.isSetCheckConstraints()) { { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter1367 : struct.checkConstraints) + for (SQLCheckConstraint _iter1375 : struct.checkConstraints) { - _iter1367.write(oprot); + _iter1375.write(oprot); } } } @@ -64136,84 +64422,84 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1368 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list1368.size); - SQLPrimaryKey _elem1369; - for (int _i1370 = 0; _i1370 < _list1368.size; ++_i1370) + org.apache.thrift.protocol.TList _list1376 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list1376.size); + SQLPrimaryKey _elem1377; + for (int _i1378 = 0; _i1378 < _list1376.size; ++_i1378) { - _elem1369 = new SQLPrimaryKey(); - _elem1369.read(iprot); - struct.primaryKeys.add(_elem1369); + _elem1377 = new SQLPrimaryKey(); + _elem1377.read(iprot); + struct.primaryKeys.add(_elem1377); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1371 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list1371.size); - SQLForeignKey _elem1372; - for (int _i1373 = 0; _i1373 < _list1371.size; ++_i1373) + org.apache.thrift.protocol.TList _list1379 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list1379.size); + SQLForeignKey _elem1380; + for (int _i1381 = 0; _i1381 < _list1379.size; ++_i1381) { - _elem1372 = new SQLForeignKey(); - _elem1372.read(iprot); - struct.foreignKeys.add(_elem1372); + _elem1380 = new SQLForeignKey(); + _elem1380.read(iprot); + struct.foreignKeys.add(_elem1380); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list1374 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list1374.size); - SQLUniqueConstraint _elem1375; - for (int _i1376 = 0; _i1376 < _list1374.size; ++_i1376) + org.apache.thrift.protocol.TList _list1382 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list1382.size); + SQLUniqueConstraint _elem1383; + for (int _i1384 = 0; _i1384 < _list1382.size; ++_i1384) { - _elem1375 = new SQLUniqueConstraint(); - _elem1375.read(iprot); - struct.uniqueConstraints.add(_elem1375); + _elem1383 = new SQLUniqueConstraint(); + _elem1383.read(iprot); + struct.uniqueConstraints.add(_elem1383); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1377 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list1377.size); - SQLNotNullConstraint _elem1378; - for (int _i1379 = 0; _i1379 < _list1377.size; ++_i1379) + org.apache.thrift.protocol.TList _list1385 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list1385.size); + SQLNotNullConstraint _elem1386; + for (int _i1387 = 0; _i1387 < _list1385.size; ++_i1387) { - _elem1378 = new SQLNotNullConstraint(); - _elem1378.read(iprot); - struct.notNullConstraints.add(_elem1378); + _elem1386 = new SQLNotNullConstraint(); + _elem1386.read(iprot); + struct.notNullConstraints.add(_elem1386); } } struct.setNotNullConstraintsIsSet(true); } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1380 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list1380.size); - SQLDefaultConstraint _elem1381; - for (int _i1382 = 0; _i1382 < _list1380.size; ++_i1382) + org.apache.thrift.protocol.TList _list1388 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list1388.size); + SQLDefaultConstraint _elem1389; + for (int _i1390 = 0; _i1390 < _list1388.size; ++_i1390) { - _elem1381 = new SQLDefaultConstraint(); - _elem1381.read(iprot); - struct.defaultConstraints.add(_elem1381); + _elem1389 = new SQLDefaultConstraint(); + _elem1389.read(iprot); + struct.defaultConstraints.add(_elem1389); } } struct.setDefaultConstraintsIsSet(true); } if (incoming.get(6)) { { - org.apache.thrift.protocol.TList _list1383 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraints = new ArrayList(_list1383.size); - SQLCheckConstraint _elem1384; - for (int _i1385 = 0; _i1385 < _list1383.size; ++_i1385) + org.apache.thrift.protocol.TList _list1391 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list1391.size); + SQLCheckConstraint _elem1392; + for (int _i1393 = 0; _i1393 < _list1391.size; ++_i1393) { - _elem1384 = new SQLCheckConstraint(); - _elem1384.read(iprot); - struct.checkConstraints.add(_elem1384); + _elem1392 = new SQLCheckConstraint(); + _elem1392.read(iprot); + struct.checkConstraints.add(_elem1392); } } struct.setCheckConstraintsIsSet(true); @@ -74404,13 +74690,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args case 3: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1386 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list1386.size); - String _elem1387; - for (int _i1388 = 0; _i1388 < _list1386.size; ++_i1388) + org.apache.thrift.protocol.TList _list1394 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list1394.size); + String _elem1395; + for (int _i1396 = 0; _i1396 < _list1394.size; ++_i1396) { - _elem1387 = iprot.readString(); - struct.partNames.add(_elem1387); + _elem1395 = iprot.readString(); + struct.partNames.add(_elem1395); } iprot.readListEnd(); } @@ -74446,9 +74732,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter1389 : struct.partNames) + for (String _iter1397 : struct.partNames) { - oprot.writeString(_iter1389); + oprot.writeString(_iter1397); } oprot.writeListEnd(); } @@ -74491,9 +74777,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter1390 : struct.partNames) + for (String _iter1398 : struct.partNames) { - oprot.writeString(_iter1390); + oprot.writeString(_iter1398); } } } @@ -74513,13 +74799,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1391 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list1391.size); - String _elem1392; - for (int _i1393 = 0; _i1393 < _list1391.size; ++_i1393) + org.apache.thrift.protocol.TList _list1399 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list1399.size); + String _elem1400; + for (int _i1401 = 0; _i1401 < _list1399.size; ++_i1401) { - _elem1392 = iprot.readString(); - struct.partNames.add(_elem1392); + _elem1400 = iprot.readString(); + struct.partNames.add(_elem1400); } } struct.setPartNamesIsSet(true); @@ -76576,13 +76862,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1394 = iprot.readListBegin(); - struct.success = new ArrayList(_list1394.size); - String _elem1395; - for (int _i1396 = 0; _i1396 < _list1394.size; ++_i1396) + org.apache.thrift.protocol.TList _list1402 = iprot.readListBegin(); + struct.success = new ArrayList(_list1402.size); + String _elem1403; + for (int _i1404 = 0; _i1404 < _list1402.size; ++_i1404) { - _elem1395 = iprot.readString(); - struct.success.add(_elem1395); + _elem1403 = iprot.readString(); + struct.success.add(_elem1403); } iprot.readListEnd(); } @@ -76617,9 +76903,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1397 : struct.success) + for (String _iter1405 : struct.success) { - oprot.writeString(_iter1397); + oprot.writeString(_iter1405); } oprot.writeListEnd(); } @@ -76658,9 +76944,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1398 : struct.success) + for (String _iter1406 : struct.success) { - oprot.writeString(_iter1398); + oprot.writeString(_iter1406); } } } @@ -76675,13 +76961,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1399 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1399.size); - String _elem1400; - for (int _i1401 = 0; _i1401 < _list1399.size; ++_i1401) + org.apache.thrift.protocol.TList _list1407 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1407.size); + String _elem1408; + for (int _i1409 = 0; _i1409 < _list1407.size; ++_i1409) { - _elem1400 = iprot.readString(); - struct.success.add(_elem1400); + _elem1408 = iprot.readString(); + struct.success.add(_elem1408); } } struct.setSuccessIsSet(true); @@ -77655,13 +77941,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1402 = iprot.readListBegin(); - struct.success = new ArrayList(_list1402.size); - String _elem1403; - for (int _i1404 = 0; _i1404 < _list1402.size; ++_i1404) + org.apache.thrift.protocol.TList _list1410 = iprot.readListBegin(); + struct.success = new ArrayList(_list1410.size); + String _elem1411; + for (int _i1412 = 0; _i1412 < _list1410.size; ++_i1412) { - _elem1403 = iprot.readString(); - struct.success.add(_elem1403); + _elem1411 = iprot.readString(); + struct.success.add(_elem1411); } iprot.readListEnd(); } @@ -77696,9 +77982,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1405 : struct.success) + for (String _iter1413 : struct.success) { - oprot.writeString(_iter1405); + oprot.writeString(_iter1413); } oprot.writeListEnd(); } @@ -77737,9 +78023,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1406 : struct.success) + for (String _iter1414 : struct.success) { - oprot.writeString(_iter1406); + oprot.writeString(_iter1414); } } } @@ -77754,13 +78040,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1407 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1407.size); - String _elem1408; - for (int _i1409 = 0; _i1409 < _list1407.size; ++_i1409) + org.apache.thrift.protocol.TList _list1415 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1415.size); + String _elem1416; + for (int _i1417 = 0; _i1417 < _list1415.size; ++_i1417) { - _elem1408 = iprot.readString(); - struct.success.add(_elem1408); + _elem1416 = iprot.readString(); + struct.success.add(_elem1416); } } struct.setSuccessIsSet(true); @@ -78417,14 +78703,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_materialize case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1410 = iprot.readListBegin(); - struct.success = new ArrayList(_list1410.size); - Table _elem1411; - for (int _i1412 = 0; _i1412 < _list1410.size; ++_i1412) + org.apache.thrift.protocol.TList _list1418 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1418.size); + Table _elem1419; + for (int _i1420 = 0; _i1420 < _list1418.size; ++_i1420) { - _elem1411 = new Table(); - _elem1411.read(iprot); - struct.success.add(_elem1411); + _elem1419 = new Table(); + _elem1419.read(iprot); + struct.success.add(_elem1419); } iprot.readListEnd(); } @@ -78459,9 +78745,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_materializ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1413 : struct.success) + for (Table _iter1421 : struct.success) { - _iter1413.write(oprot); + _iter1421.write(oprot); } oprot.writeListEnd(); } @@ -78500,9 +78786,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_materialize if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1414 : struct.success) + for (Table _iter1422 : struct.success) { - _iter1414.write(oprot); + _iter1422.write(oprot); } } } @@ -78517,14 +78803,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_materialized BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1415 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1415.size); - Table _elem1416; - for (int _i1417 = 0; _i1417 < _list1415.size; ++_i1417) + org.apache.thrift.protocol.TList _list1423 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1423.size); + Table _elem1424; + for (int _i1425 = 0; _i1425 < _list1423.size; ++_i1425) { - _elem1416 = new Table(); - _elem1416.read(iprot); - struct.success.add(_elem1416); + _elem1424 = new Table(); + _elem1424.read(iprot); + struct.success.add(_elem1424); } } struct.setSuccessIsSet(true); @@ -79290,13 +79576,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1418 = iprot.readListBegin(); - struct.success = new ArrayList(_list1418.size); - String _elem1419; - for (int _i1420 = 0; _i1420 < _list1418.size; ++_i1420) + org.apache.thrift.protocol.TList _list1426 = iprot.readListBegin(); + struct.success = new ArrayList(_list1426.size); + String _elem1427; + for (int _i1428 = 0; _i1428 < _list1426.size; ++_i1428) { - _elem1419 = iprot.readString(); - struct.success.add(_elem1419); + _elem1427 = iprot.readString(); + struct.success.add(_elem1427); } iprot.readListEnd(); } @@ -79331,9 +79617,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1421 : struct.success) + for (String _iter1429 : struct.success) { - oprot.writeString(_iter1421); + oprot.writeString(_iter1429); } oprot.writeListEnd(); } @@ -79372,9 +79658,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1422 : struct.success) + for (String _iter1430 : struct.success) { - oprot.writeString(_iter1422); + oprot.writeString(_iter1430); } } } @@ -79389,13 +79675,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1423 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1423.size); - String _elem1424; - for (int _i1425 = 0; _i1425 < _list1423.size; ++_i1425) + org.apache.thrift.protocol.TList _list1431 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1431.size); + String _elem1432; + for (int _i1433 = 0; _i1433 < _list1431.size; ++_i1433) { - _elem1424 = iprot.readString(); - struct.success.add(_elem1424); + _elem1432 = iprot.readString(); + struct.success.add(_elem1432); } } struct.setSuccessIsSet(true); @@ -79900,13 +80186,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1426 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list1426.size); - String _elem1427; - for (int _i1428 = 0; _i1428 < _list1426.size; ++_i1428) + org.apache.thrift.protocol.TList _list1434 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list1434.size); + String _elem1435; + for (int _i1436 = 0; _i1436 < _list1434.size; ++_i1436) { - _elem1427 = iprot.readString(); - struct.tbl_types.add(_elem1427); + _elem1435 = iprot.readString(); + struct.tbl_types.add(_elem1435); } iprot.readListEnd(); } @@ -79942,9 +80228,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter1429 : struct.tbl_types) + for (String _iter1437 : struct.tbl_types) { - oprot.writeString(_iter1429); + oprot.writeString(_iter1437); } oprot.writeListEnd(); } @@ -79987,9 +80273,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter1430 : struct.tbl_types) + for (String _iter1438 : struct.tbl_types) { - oprot.writeString(_iter1430); + oprot.writeString(_iter1438); } } } @@ -80009,13 +80295,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1431 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list1431.size); - String _elem1432; - for (int _i1433 = 0; _i1433 < _list1431.size; ++_i1433) + org.apache.thrift.protocol.TList _list1439 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list1439.size); + String _elem1440; + for (int _i1441 = 0; _i1441 < _list1439.size; ++_i1441) { - _elem1432 = iprot.readString(); - struct.tbl_types.add(_elem1432); + _elem1440 = iprot.readString(); + struct.tbl_types.add(_elem1440); } } struct.setTbl_typesIsSet(true); @@ -80421,14 +80707,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1434 = iprot.readListBegin(); - struct.success = new ArrayList(_list1434.size); - TableMeta _elem1435; - for (int _i1436 = 0; _i1436 < _list1434.size; ++_i1436) + org.apache.thrift.protocol.TList _list1442 = iprot.readListBegin(); + struct.success = new ArrayList(_list1442.size); + TableMeta _elem1443; + for (int _i1444 = 0; _i1444 < _list1442.size; ++_i1444) { - _elem1435 = new TableMeta(); - _elem1435.read(iprot); - struct.success.add(_elem1435); + _elem1443 = new TableMeta(); + _elem1443.read(iprot); + struct.success.add(_elem1443); } iprot.readListEnd(); } @@ -80463,9 +80749,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter1437 : struct.success) + for (TableMeta _iter1445 : struct.success) { - _iter1437.write(oprot); + _iter1445.write(oprot); } oprot.writeListEnd(); } @@ -80504,9 +80790,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter1438 : struct.success) + for (TableMeta _iter1446 : struct.success) { - _iter1438.write(oprot); + _iter1446.write(oprot); } } } @@ -80521,14 +80807,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1439 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1439.size); - TableMeta _elem1440; - for (int _i1441 = 0; _i1441 < _list1439.size; ++_i1441) + org.apache.thrift.protocol.TList _list1447 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1447.size); + TableMeta _elem1448; + for (int _i1449 = 0; _i1449 < _list1447.size; ++_i1449) { - _elem1440 = new TableMeta(); - _elem1440.read(iprot); - struct.success.add(_elem1440); + _elem1448 = new TableMeta(); + _elem1448.read(iprot); + struct.success.add(_elem1448); } } struct.setSuccessIsSet(true); @@ -81294,13 +81580,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1442 = iprot.readListBegin(); - struct.success = new ArrayList(_list1442.size); - String _elem1443; - for (int _i1444 = 0; _i1444 < _list1442.size; ++_i1444) + org.apache.thrift.protocol.TList _list1450 = iprot.readListBegin(); + struct.success = new ArrayList(_list1450.size); + String _elem1451; + for (int _i1452 = 0; _i1452 < _list1450.size; ++_i1452) { - _elem1443 = iprot.readString(); - struct.success.add(_elem1443); + _elem1451 = iprot.readString(); + struct.success.add(_elem1451); } iprot.readListEnd(); } @@ -81335,9 +81621,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1445 : struct.success) + for (String _iter1453 : struct.success) { - oprot.writeString(_iter1445); + oprot.writeString(_iter1453); } oprot.writeListEnd(); } @@ -81376,9 +81662,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1446 : struct.success) + for (String _iter1454 : struct.success) { - oprot.writeString(_iter1446); + oprot.writeString(_iter1454); } } } @@ -81393,13 +81679,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1447 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1447.size); - String _elem1448; - for (int _i1449 = 0; _i1449 < _list1447.size; ++_i1449) + org.apache.thrift.protocol.TList _list1455 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1455.size); + String _elem1456; + for (int _i1457 = 0; _i1457 < _list1455.size; ++_i1457) { - _elem1448 = iprot.readString(); - struct.success.add(_elem1448); + _elem1456 = iprot.readString(); + struct.success.add(_elem1456); } } struct.setSuccessIsSet(true); @@ -82852,13 +83138,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1450 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1450.size); - String _elem1451; - for (int _i1452 = 0; _i1452 < _list1450.size; ++_i1452) + org.apache.thrift.protocol.TList _list1458 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1458.size); + String _elem1459; + for (int _i1460 = 0; _i1460 < _list1458.size; ++_i1460) { - _elem1451 = iprot.readString(); - struct.tbl_names.add(_elem1451); + _elem1459 = iprot.readString(); + struct.tbl_names.add(_elem1459); } iprot.readListEnd(); } @@ -82889,9 +83175,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1453 : struct.tbl_names) + for (String _iter1461 : struct.tbl_names) { - oprot.writeString(_iter1453); + oprot.writeString(_iter1461); } oprot.writeListEnd(); } @@ -82928,9 +83214,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1454 : struct.tbl_names) + for (String _iter1462 : struct.tbl_names) { - oprot.writeString(_iter1454); + oprot.writeString(_iter1462); } } } @@ -82946,13 +83232,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1455 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1455.size); - String _elem1456; - for (int _i1457 = 0; _i1457 < _list1455.size; ++_i1457) + org.apache.thrift.protocol.TList _list1463 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1463.size); + String _elem1464; + for (int _i1465 = 0; _i1465 < _list1463.size; ++_i1465) { - _elem1456 = iprot.readString(); - struct.tbl_names.add(_elem1456); + _elem1464 = iprot.readString(); + struct.tbl_names.add(_elem1464); } } struct.setTbl_namesIsSet(true); @@ -83277,14 +83563,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1458 = iprot.readListBegin(); - struct.success = new ArrayList
(_list1458.size); - Table _elem1459; - for (int _i1460 = 0; _i1460 < _list1458.size; ++_i1460) + org.apache.thrift.protocol.TList _list1466 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1466.size); + Table _elem1467; + for (int _i1468 = 0; _i1468 < _list1466.size; ++_i1468) { - _elem1459 = new Table(); - _elem1459.read(iprot); - struct.success.add(_elem1459); + _elem1467 = new Table(); + _elem1467.read(iprot); + struct.success.add(_elem1467); } iprot.readListEnd(); } @@ -83310,9 +83596,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1461 : struct.success) + for (Table _iter1469 : struct.success) { - _iter1461.write(oprot); + _iter1469.write(oprot); } oprot.writeListEnd(); } @@ -83343,9 +83629,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1462 : struct.success) + for (Table _iter1470 : struct.success) { - _iter1462.write(oprot); + _iter1470.write(oprot); } } } @@ -83357,14 +83643,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1463 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1463.size); - Table _elem1464; - for (int _i1465 = 0; _i1465 < _list1463.size; ++_i1465) + org.apache.thrift.protocol.TList _list1471 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1471.size); + Table _elem1472; + for (int _i1473 = 0; _i1473 < _list1471.size; ++_i1473) { - _elem1464 = new Table(); - _elem1464.read(iprot); - struct.success.add(_elem1464); + _elem1472 = new Table(); + _elem1472.read(iprot); + struct.success.add(_elem1472); } } struct.setSuccessIsSet(true); @@ -84133,14 +84419,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_ext_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1466 = iprot.readListBegin(); - struct.success = new ArrayList(_list1466.size); - ExtendedTableInfo _elem1467; - for (int _i1468 = 0; _i1468 < _list1466.size; ++_i1468) + org.apache.thrift.protocol.TList _list1474 = iprot.readListBegin(); + struct.success = new ArrayList(_list1474.size); + ExtendedTableInfo _elem1475; + for (int _i1476 = 0; _i1476 < _list1474.size; ++_i1476) { - _elem1467 = new ExtendedTableInfo(); - _elem1467.read(iprot); - struct.success.add(_elem1467); + _elem1475 = new ExtendedTableInfo(); + _elem1475.read(iprot); + struct.success.add(_elem1475); } iprot.readListEnd(); } @@ -84175,9 +84461,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_ext_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (ExtendedTableInfo _iter1469 : struct.success) + for (ExtendedTableInfo _iter1477 : struct.success) { - _iter1469.write(oprot); + _iter1477.write(oprot); } oprot.writeListEnd(); } @@ -84216,9 +84502,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_ext_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (ExtendedTableInfo _iter1470 : struct.success) + for (ExtendedTableInfo _iter1478 : struct.success) { - _iter1470.write(oprot); + _iter1478.write(oprot); } } } @@ -84233,14 +84519,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_ext_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1471 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1471.size); - ExtendedTableInfo _elem1472; - for (int _i1473 = 0; _i1473 < _list1471.size; ++_i1473) + org.apache.thrift.protocol.TList _list1479 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1479.size); + ExtendedTableInfo _elem1480; + for (int _i1481 = 0; _i1481 < _list1479.size; ++_i1481) { - _elem1472 = new ExtendedTableInfo(); - _elem1472.read(iprot); - struct.success.add(_elem1472); + _elem1480 = new ExtendedTableInfo(); + _elem1480.read(iprot); + struct.success.add(_elem1480); } } struct.setSuccessIsSet(true); @@ -89753,13 +90039,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1474 = iprot.readListBegin(); - struct.success = new ArrayList(_list1474.size); - String _elem1475; - for (int _i1476 = 0; _i1476 < _list1474.size; ++_i1476) + org.apache.thrift.protocol.TList _list1482 = iprot.readListBegin(); + struct.success = new ArrayList(_list1482.size); + String _elem1483; + for (int _i1484 = 0; _i1484 < _list1482.size; ++_i1484) { - _elem1475 = iprot.readString(); - struct.success.add(_elem1475); + _elem1483 = iprot.readString(); + struct.success.add(_elem1483); } iprot.readListEnd(); } @@ -89812,9 +90098,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1477 : struct.success) + for (String _iter1485 : struct.success) { - oprot.writeString(_iter1477); + oprot.writeString(_iter1485); } oprot.writeListEnd(); } @@ -89869,9 +90155,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1478 : struct.success) + for (String _iter1486 : struct.success) { - oprot.writeString(_iter1478); + oprot.writeString(_iter1486); } } } @@ -89892,13 +90178,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1479 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1479.size); - String _elem1480; - for (int _i1481 = 0; _i1481 < _list1479.size; ++_i1481) + org.apache.thrift.protocol.TList _list1487 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1487.size); + String _elem1488; + for (int _i1489 = 0; _i1489 < _list1487.size; ++_i1489) { - _elem1480 = iprot.readString(); - struct.success.add(_elem1480); + _elem1488 = iprot.readString(); + struct.success.add(_elem1488); } } struct.setSuccessIsSet(true); @@ -96695,14 +96981,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1482 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1482.size); - Partition _elem1483; - for (int _i1484 = 0; _i1484 < _list1482.size; ++_i1484) + org.apache.thrift.protocol.TList _list1490 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1490.size); + Partition _elem1491; + for (int _i1492 = 0; _i1492 < _list1490.size; ++_i1492) { - _elem1483 = new Partition(); - _elem1483.read(iprot); - struct.new_parts.add(_elem1483); + _elem1491 = new Partition(); + _elem1491.read(iprot); + struct.new_parts.add(_elem1491); } iprot.readListEnd(); } @@ -96728,9 +97014,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1485 : struct.new_parts) + for (Partition _iter1493 : struct.new_parts) { - _iter1485.write(oprot); + _iter1493.write(oprot); } oprot.writeListEnd(); } @@ -96761,9 +97047,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1486 : struct.new_parts) + for (Partition _iter1494 : struct.new_parts) { - _iter1486.write(oprot); + _iter1494.write(oprot); } } } @@ -96775,14 +97061,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1487 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1487.size); - Partition _elem1488; - for (int _i1489 = 0; _i1489 < _list1487.size; ++_i1489) + org.apache.thrift.protocol.TList _list1495 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1495.size); + Partition _elem1496; + for (int _i1497 = 0; _i1497 < _list1495.size; ++_i1497) { - _elem1488 = new Partition(); - _elem1488.read(iprot); - struct.new_parts.add(_elem1488); + _elem1496 = new Partition(); + _elem1496.read(iprot); + struct.new_parts.add(_elem1496); } } struct.setNew_partsIsSet(true); @@ -97783,14 +98069,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1490 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1490.size); - PartitionSpec _elem1491; - for (int _i1492 = 0; _i1492 < _list1490.size; ++_i1492) + org.apache.thrift.protocol.TList _list1498 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1498.size); + PartitionSpec _elem1499; + for (int _i1500 = 0; _i1500 < _list1498.size; ++_i1500) { - _elem1491 = new PartitionSpec(); - _elem1491.read(iprot); - struct.new_parts.add(_elem1491); + _elem1499 = new PartitionSpec(); + _elem1499.read(iprot); + struct.new_parts.add(_elem1499); } iprot.readListEnd(); } @@ -97816,9 +98102,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1493 : struct.new_parts) + for (PartitionSpec _iter1501 : struct.new_parts) { - _iter1493.write(oprot); + _iter1501.write(oprot); } oprot.writeListEnd(); } @@ -97849,9 +98135,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1494 : struct.new_parts) + for (PartitionSpec _iter1502 : struct.new_parts) { - _iter1494.write(oprot); + _iter1502.write(oprot); } } } @@ -97863,14 +98149,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1495 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1495.size); - PartitionSpec _elem1496; - for (int _i1497 = 0; _i1497 < _list1495.size; ++_i1497) + org.apache.thrift.protocol.TList _list1503 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1503.size); + PartitionSpec _elem1504; + for (int _i1505 = 0; _i1505 < _list1503.size; ++_i1505) { - _elem1496 = new PartitionSpec(); - _elem1496.read(iprot); - struct.new_parts.add(_elem1496); + _elem1504 = new PartitionSpec(); + _elem1504.read(iprot); + struct.new_parts.add(_elem1504); } } struct.setNew_partsIsSet(true); @@ -99046,13 +99332,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1498 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1498.size); - String _elem1499; - for (int _i1500 = 0; _i1500 < _list1498.size; ++_i1500) + org.apache.thrift.protocol.TList _list1506 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1506.size); + String _elem1507; + for (int _i1508 = 0; _i1508 < _list1506.size; ++_i1508) { - _elem1499 = iprot.readString(); - struct.part_vals.add(_elem1499); + _elem1507 = iprot.readString(); + struct.part_vals.add(_elem1507); } iprot.readListEnd(); } @@ -99088,9 +99374,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1501 : struct.part_vals) + for (String _iter1509 : struct.part_vals) { - oprot.writeString(_iter1501); + oprot.writeString(_iter1509); } oprot.writeListEnd(); } @@ -99133,9 +99419,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1502 : struct.part_vals) + for (String _iter1510 : struct.part_vals) { - oprot.writeString(_iter1502); + oprot.writeString(_iter1510); } } } @@ -99155,13 +99441,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1503 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1503.size); - String _elem1504; - for (int _i1505 = 0; _i1505 < _list1503.size; ++_i1505) + org.apache.thrift.protocol.TList _list1511 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1511.size); + String _elem1512; + for (int _i1513 = 0; _i1513 < _list1511.size; ++_i1513) { - _elem1504 = iprot.readString(); - struct.part_vals.add(_elem1504); + _elem1512 = iprot.readString(); + struct.part_vals.add(_elem1512); } } struct.setPart_valsIsSet(true); @@ -101470,13 +101756,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1506 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1506.size); - String _elem1507; - for (int _i1508 = 0; _i1508 < _list1506.size; ++_i1508) + org.apache.thrift.protocol.TList _list1514 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1514.size); + String _elem1515; + for (int _i1516 = 0; _i1516 < _list1514.size; ++_i1516) { - _elem1507 = iprot.readString(); - struct.part_vals.add(_elem1507); + _elem1515 = iprot.readString(); + struct.part_vals.add(_elem1515); } iprot.readListEnd(); } @@ -101521,9 +101807,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1509 : struct.part_vals) + for (String _iter1517 : struct.part_vals) { - oprot.writeString(_iter1509); + oprot.writeString(_iter1517); } oprot.writeListEnd(); } @@ -101574,9 +101860,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1510 : struct.part_vals) + for (String _iter1518 : struct.part_vals) { - oprot.writeString(_iter1510); + oprot.writeString(_iter1518); } } } @@ -101599,13 +101885,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1511 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1511.size); - String _elem1512; - for (int _i1513 = 0; _i1513 < _list1511.size; ++_i1513) + org.apache.thrift.protocol.TList _list1519 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1519.size); + String _elem1520; + for (int _i1521 = 0; _i1521 < _list1519.size; ++_i1521) { - _elem1512 = iprot.readString(); - struct.part_vals.add(_elem1512); + _elem1520 = iprot.readString(); + struct.part_vals.add(_elem1520); } } struct.setPart_valsIsSet(true); @@ -105475,13 +105761,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1514 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1514.size); - String _elem1515; - for (int _i1516 = 0; _i1516 < _list1514.size; ++_i1516) + org.apache.thrift.protocol.TList _list1522 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1522.size); + String _elem1523; + for (int _i1524 = 0; _i1524 < _list1522.size; ++_i1524) { - _elem1515 = iprot.readString(); - struct.part_vals.add(_elem1515); + _elem1523 = iprot.readString(); + struct.part_vals.add(_elem1523); } iprot.readListEnd(); } @@ -105525,9 +105811,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1517 : struct.part_vals) + for (String _iter1525 : struct.part_vals) { - oprot.writeString(_iter1517); + oprot.writeString(_iter1525); } oprot.writeListEnd(); } @@ -105576,9 +105862,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1518 : struct.part_vals) + for (String _iter1526 : struct.part_vals) { - oprot.writeString(_iter1518); + oprot.writeString(_iter1526); } } } @@ -105601,13 +105887,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1519 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1519.size); - String _elem1520; - for (int _i1521 = 0; _i1521 < _list1519.size; ++_i1521) + org.apache.thrift.protocol.TList _list1527 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1527.size); + String _elem1528; + for (int _i1529 = 0; _i1529 < _list1527.size; ++_i1529) { - _elem1520 = iprot.readString(); - struct.part_vals.add(_elem1520); + _elem1528 = iprot.readString(); + struct.part_vals.add(_elem1528); } } struct.setPart_valsIsSet(true); @@ -106846,13 +107132,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1522 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1522.size); - String _elem1523; - for (int _i1524 = 0; _i1524 < _list1522.size; ++_i1524) + org.apache.thrift.protocol.TList _list1530 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1530.size); + String _elem1531; + for (int _i1532 = 0; _i1532 < _list1530.size; ++_i1532) { - _elem1523 = iprot.readString(); - struct.part_vals.add(_elem1523); + _elem1531 = iprot.readString(); + struct.part_vals.add(_elem1531); } iprot.readListEnd(); } @@ -106905,9 +107191,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1525 : struct.part_vals) + for (String _iter1533 : struct.part_vals) { - oprot.writeString(_iter1525); + oprot.writeString(_iter1533); } oprot.writeListEnd(); } @@ -106964,9 +107250,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1526 : struct.part_vals) + for (String _iter1534 : struct.part_vals) { - oprot.writeString(_iter1526); + oprot.writeString(_iter1534); } } } @@ -106992,13 +107278,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1527 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1527.size); - String _elem1528; - for (int _i1529 = 0; _i1529 < _list1527.size; ++_i1529) + org.apache.thrift.protocol.TList _list1535 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1535.size); + String _elem1536; + for (int _i1537 = 0; _i1537 < _list1535.size; ++_i1537) { - _elem1528 = iprot.readString(); - struct.part_vals.add(_elem1528); + _elem1536 = iprot.readString(); + struct.part_vals.add(_elem1536); } } struct.setPart_valsIsSet(true); @@ -111600,13 +111886,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1530 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1530.size); - String _elem1531; - for (int _i1532 = 0; _i1532 < _list1530.size; ++_i1532) + org.apache.thrift.protocol.TList _list1538 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1538.size); + String _elem1539; + for (int _i1540 = 0; _i1540 < _list1538.size; ++_i1540) { - _elem1531 = iprot.readString(); - struct.part_vals.add(_elem1531); + _elem1539 = iprot.readString(); + struct.part_vals.add(_elem1539); } iprot.readListEnd(); } @@ -111642,9 +111928,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1533 : struct.part_vals) + for (String _iter1541 : struct.part_vals) { - oprot.writeString(_iter1533); + oprot.writeString(_iter1541); } oprot.writeListEnd(); } @@ -111687,9 +111973,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1534 : struct.part_vals) + for (String _iter1542 : struct.part_vals) { - oprot.writeString(_iter1534); + oprot.writeString(_iter1542); } } } @@ -111709,13 +111995,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1535 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1535.size); - String _elem1536; - for (int _i1537 = 0; _i1537 < _list1535.size; ++_i1537) + org.apache.thrift.protocol.TList _list1543 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1543.size); + String _elem1544; + for (int _i1545 = 0; _i1545 < _list1543.size; ++_i1545) { - _elem1536 = iprot.readString(); - struct.part_vals.add(_elem1536); + _elem1544 = iprot.readString(); + struct.part_vals.add(_elem1544); } } struct.setPart_valsIsSet(true); @@ -113871,15 +114157,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1538 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1538.size); - String _key1539; - String _val1540; - for (int _i1541 = 0; _i1541 < _map1538.size; ++_i1541) + org.apache.thrift.protocol.TMap _map1546 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1546.size); + String _key1547; + String _val1548; + for (int _i1549 = 0; _i1549 < _map1546.size; ++_i1549) { - _key1539 = iprot.readString(); - _val1540 = iprot.readString(); - struct.partitionSpecs.put(_key1539, _val1540); + _key1547 = iprot.readString(); + _val1548 = iprot.readString(); + struct.partitionSpecs.put(_key1547, _val1548); } iprot.readMapEnd(); } @@ -113937,10 +114223,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1542 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1550 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1542.getKey()); - oprot.writeString(_iter1542.getValue()); + oprot.writeString(_iter1550.getKey()); + oprot.writeString(_iter1550.getValue()); } oprot.writeMapEnd(); } @@ -114003,10 +114289,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1543 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1551 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1543.getKey()); - oprot.writeString(_iter1543.getValue()); + oprot.writeString(_iter1551.getKey()); + oprot.writeString(_iter1551.getValue()); } } } @@ -114030,15 +114316,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1544 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1544.size); - String _key1545; - String _val1546; - for (int _i1547 = 0; _i1547 < _map1544.size; ++_i1547) + org.apache.thrift.protocol.TMap _map1552 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1552.size); + String _key1553; + String _val1554; + for (int _i1555 = 0; _i1555 < _map1552.size; ++_i1555) { - _key1545 = iprot.readString(); - _val1546 = iprot.readString(); - struct.partitionSpecs.put(_key1545, _val1546); + _key1553 = iprot.readString(); + _val1554 = iprot.readString(); + struct.partitionSpecs.put(_key1553, _val1554); } } struct.setPartitionSpecsIsSet(true); @@ -115484,15 +115770,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1548 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1548.size); - String _key1549; - String _val1550; - for (int _i1551 = 0; _i1551 < _map1548.size; ++_i1551) + org.apache.thrift.protocol.TMap _map1556 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1556.size); + String _key1557; + String _val1558; + for (int _i1559 = 0; _i1559 < _map1556.size; ++_i1559) { - _key1549 = iprot.readString(); - _val1550 = iprot.readString(); - struct.partitionSpecs.put(_key1549, _val1550); + _key1557 = iprot.readString(); + _val1558 = iprot.readString(); + struct.partitionSpecs.put(_key1557, _val1558); } iprot.readMapEnd(); } @@ -115550,10 +115836,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1552 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1560 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1552.getKey()); - oprot.writeString(_iter1552.getValue()); + oprot.writeString(_iter1560.getKey()); + oprot.writeString(_iter1560.getValue()); } oprot.writeMapEnd(); } @@ -115616,10 +115902,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1553 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1561 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1553.getKey()); - oprot.writeString(_iter1553.getValue()); + oprot.writeString(_iter1561.getKey()); + oprot.writeString(_iter1561.getValue()); } } } @@ -115643,15 +115929,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1554 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1554.size); - String _key1555; - String _val1556; - for (int _i1557 = 0; _i1557 < _map1554.size; ++_i1557) + org.apache.thrift.protocol.TMap _map1562 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1562.size); + String _key1563; + String _val1564; + for (int _i1565 = 0; _i1565 < _map1562.size; ++_i1565) { - _key1555 = iprot.readString(); - _val1556 = iprot.readString(); - struct.partitionSpecs.put(_key1555, _val1556); + _key1563 = iprot.readString(); + _val1564 = iprot.readString(); + struct.partitionSpecs.put(_key1563, _val1564); } } struct.setPartitionSpecsIsSet(true); @@ -116316,14 +116602,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1558 = iprot.readListBegin(); - struct.success = new ArrayList(_list1558.size); - Partition _elem1559; - for (int _i1560 = 0; _i1560 < _list1558.size; ++_i1560) + org.apache.thrift.protocol.TList _list1566 = iprot.readListBegin(); + struct.success = new ArrayList(_list1566.size); + Partition _elem1567; + for (int _i1568 = 0; _i1568 < _list1566.size; ++_i1568) { - _elem1559 = new Partition(); - _elem1559.read(iprot); - struct.success.add(_elem1559); + _elem1567 = new Partition(); + _elem1567.read(iprot); + struct.success.add(_elem1567); } iprot.readListEnd(); } @@ -116385,9 +116671,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1561 : struct.success) + for (Partition _iter1569 : struct.success) { - _iter1561.write(oprot); + _iter1569.write(oprot); } oprot.writeListEnd(); } @@ -116450,9 +116736,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1562 : struct.success) + for (Partition _iter1570 : struct.success) { - _iter1562.write(oprot); + _iter1570.write(oprot); } } } @@ -116476,14 +116762,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1563 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1563.size); - Partition _elem1564; - for (int _i1565 = 0; _i1565 < _list1563.size; ++_i1565) + org.apache.thrift.protocol.TList _list1571 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1571.size); + Partition _elem1572; + for (int _i1573 = 0; _i1573 < _list1571.size; ++_i1573) { - _elem1564 = new Partition(); - _elem1564.read(iprot); - struct.success.add(_elem1564); + _elem1572 = new Partition(); + _elem1572.read(iprot); + struct.success.add(_elem1572); } } struct.setSuccessIsSet(true); @@ -117182,13 +117468,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1566 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1566.size); - String _elem1567; - for (int _i1568 = 0; _i1568 < _list1566.size; ++_i1568) + org.apache.thrift.protocol.TList _list1574 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1574.size); + String _elem1575; + for (int _i1576 = 0; _i1576 < _list1574.size; ++_i1576) { - _elem1567 = iprot.readString(); - struct.part_vals.add(_elem1567); + _elem1575 = iprot.readString(); + struct.part_vals.add(_elem1575); } iprot.readListEnd(); } @@ -117208,13 +117494,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1569 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1569.size); - String _elem1570; - for (int _i1571 = 0; _i1571 < _list1569.size; ++_i1571) + org.apache.thrift.protocol.TList _list1577 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1577.size); + String _elem1578; + for (int _i1579 = 0; _i1579 < _list1577.size; ++_i1579) { - _elem1570 = iprot.readString(); - struct.group_names.add(_elem1570); + _elem1578 = iprot.readString(); + struct.group_names.add(_elem1578); } iprot.readListEnd(); } @@ -117250,9 +117536,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1572 : struct.part_vals) + for (String _iter1580 : struct.part_vals) { - oprot.writeString(_iter1572); + oprot.writeString(_iter1580); } oprot.writeListEnd(); } @@ -117267,9 +117553,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1573 : struct.group_names) + for (String _iter1581 : struct.group_names) { - oprot.writeString(_iter1573); + oprot.writeString(_iter1581); } oprot.writeListEnd(); } @@ -117318,9 +117604,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1574 : struct.part_vals) + for (String _iter1582 : struct.part_vals) { - oprot.writeString(_iter1574); + oprot.writeString(_iter1582); } } } @@ -117330,9 +117616,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1575 : struct.group_names) + for (String _iter1583 : struct.group_names) { - oprot.writeString(_iter1575); + oprot.writeString(_iter1583); } } } @@ -117352,13 +117638,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1576 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1576.size); - String _elem1577; - for (int _i1578 = 0; _i1578 < _list1576.size; ++_i1578) + org.apache.thrift.protocol.TList _list1584 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1584.size); + String _elem1585; + for (int _i1586 = 0; _i1586 < _list1584.size; ++_i1586) { - _elem1577 = iprot.readString(); - struct.part_vals.add(_elem1577); + _elem1585 = iprot.readString(); + struct.part_vals.add(_elem1585); } } struct.setPart_valsIsSet(true); @@ -117369,13 +117655,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1579 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1579.size); - String _elem1580; - for (int _i1581 = 0; _i1581 < _list1579.size; ++_i1581) + org.apache.thrift.protocol.TList _list1587 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1587.size); + String _elem1588; + for (int _i1589 = 0; _i1589 < _list1587.size; ++_i1589) { - _elem1580 = iprot.readString(); - struct.group_names.add(_elem1580); + _elem1588 = iprot.readString(); + struct.group_names.add(_elem1588); } } struct.setGroup_namesIsSet(true); @@ -120144,14 +120430,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1582 = iprot.readListBegin(); - struct.success = new ArrayList(_list1582.size); - Partition _elem1583; - for (int _i1584 = 0; _i1584 < _list1582.size; ++_i1584) + org.apache.thrift.protocol.TList _list1590 = iprot.readListBegin(); + struct.success = new ArrayList(_list1590.size); + Partition _elem1591; + for (int _i1592 = 0; _i1592 < _list1590.size; ++_i1592) { - _elem1583 = new Partition(); - _elem1583.read(iprot); - struct.success.add(_elem1583); + _elem1591 = new Partition(); + _elem1591.read(iprot); + struct.success.add(_elem1591); } iprot.readListEnd(); } @@ -120195,9 +120481,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1585 : struct.success) + for (Partition _iter1593 : struct.success) { - _iter1585.write(oprot); + _iter1593.write(oprot); } oprot.writeListEnd(); } @@ -120244,9 +120530,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1586 : struct.success) + for (Partition _iter1594 : struct.success) { - _iter1586.write(oprot); + _iter1594.write(oprot); } } } @@ -120264,14 +120550,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1587 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1587.size); - Partition _elem1588; - for (int _i1589 = 0; _i1589 < _list1587.size; ++_i1589) + org.apache.thrift.protocol.TList _list1595 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1595.size); + Partition _elem1596; + for (int _i1597 = 0; _i1597 < _list1595.size; ++_i1597) { - _elem1588 = new Partition(); - _elem1588.read(iprot); - struct.success.add(_elem1588); + _elem1596 = new Partition(); + _elem1596.read(iprot); + struct.success.add(_elem1596); } } struct.setSuccessIsSet(true); @@ -121899,13 +122185,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1590 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1590.size); - String _elem1591; - for (int _i1592 = 0; _i1592 < _list1590.size; ++_i1592) + org.apache.thrift.protocol.TList _list1598 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1598.size); + String _elem1599; + for (int _i1600 = 0; _i1600 < _list1598.size; ++_i1600) { - _elem1591 = iprot.readString(); - struct.group_names.add(_elem1591); + _elem1599 = iprot.readString(); + struct.group_names.add(_elem1599); } iprot.readListEnd(); } @@ -121949,9 +122235,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1593 : struct.group_names) + for (String _iter1601 : struct.group_names) { - oprot.writeString(_iter1593); + oprot.writeString(_iter1601); } oprot.writeListEnd(); } @@ -122006,9 +122292,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1594 : struct.group_names) + for (String _iter1602 : struct.group_names) { - oprot.writeString(_iter1594); + oprot.writeString(_iter1602); } } } @@ -122036,13 +122322,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1595 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1595.size); - String _elem1596; - for (int _i1597 = 0; _i1597 < _list1595.size; ++_i1597) + org.apache.thrift.protocol.TList _list1603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1603.size); + String _elem1604; + for (int _i1605 = 0; _i1605 < _list1603.size; ++_i1605) { - _elem1596 = iprot.readString(); - struct.group_names.add(_elem1596); + _elem1604 = iprot.readString(); + struct.group_names.add(_elem1604); } } struct.setGroup_namesIsSet(true); @@ -122529,14 +122815,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1598 = iprot.readListBegin(); - struct.success = new ArrayList(_list1598.size); - Partition _elem1599; - for (int _i1600 = 0; _i1600 < _list1598.size; ++_i1600) + org.apache.thrift.protocol.TList _list1606 = iprot.readListBegin(); + struct.success = new ArrayList(_list1606.size); + Partition _elem1607; + for (int _i1608 = 0; _i1608 < _list1606.size; ++_i1608) { - _elem1599 = new Partition(); - _elem1599.read(iprot); - struct.success.add(_elem1599); + _elem1607 = new Partition(); + _elem1607.read(iprot); + struct.success.add(_elem1607); } iprot.readListEnd(); } @@ -122580,9 +122866,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1601 : struct.success) + for (Partition _iter1609 : struct.success) { - _iter1601.write(oprot); + _iter1609.write(oprot); } oprot.writeListEnd(); } @@ -122629,9 +122915,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1602 : struct.success) + for (Partition _iter1610 : struct.success) { - _iter1602.write(oprot); + _iter1610.write(oprot); } } } @@ -122649,14 +122935,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1603.size); - Partition _elem1604; - for (int _i1605 = 0; _i1605 < _list1603.size; ++_i1605) + org.apache.thrift.protocol.TList _list1611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1611.size); + Partition _elem1612; + for (int _i1613 = 0; _i1613 < _list1611.size; ++_i1613) { - _elem1604 = new Partition(); - _elem1604.read(iprot); - struct.success.add(_elem1604); + _elem1612 = new Partition(); + _elem1612.read(iprot); + struct.success.add(_elem1612); } } struct.setSuccessIsSet(true); @@ -123719,14 +124005,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1606 = iprot.readListBegin(); - struct.success = new ArrayList(_list1606.size); - PartitionSpec _elem1607; - for (int _i1608 = 0; _i1608 < _list1606.size; ++_i1608) + org.apache.thrift.protocol.TList _list1614 = iprot.readListBegin(); + struct.success = new ArrayList(_list1614.size); + PartitionSpec _elem1615; + for (int _i1616 = 0; _i1616 < _list1614.size; ++_i1616) { - _elem1607 = new PartitionSpec(); - _elem1607.read(iprot); - struct.success.add(_elem1607); + _elem1615 = new PartitionSpec(); + _elem1615.read(iprot); + struct.success.add(_elem1615); } iprot.readListEnd(); } @@ -123770,9 +124056,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1609 : struct.success) + for (PartitionSpec _iter1617 : struct.success) { - _iter1609.write(oprot); + _iter1617.write(oprot); } oprot.writeListEnd(); } @@ -123819,9 +124105,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1610 : struct.success) + for (PartitionSpec _iter1618 : struct.success) { - _iter1610.write(oprot); + _iter1618.write(oprot); } } } @@ -123839,14 +124125,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1611.size); - PartitionSpec _elem1612; - for (int _i1613 = 0; _i1613 < _list1611.size; ++_i1613) + org.apache.thrift.protocol.TList _list1619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1619.size); + PartitionSpec _elem1620; + for (int _i1621 = 0; _i1621 < _list1619.size; ++_i1621) { - _elem1612 = new PartitionSpec(); - _elem1612.read(iprot); - struct.success.add(_elem1612); + _elem1620 = new PartitionSpec(); + _elem1620.read(iprot); + struct.success.add(_elem1620); } } struct.setSuccessIsSet(true); @@ -124906,13 +125192,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1614 = iprot.readListBegin(); - struct.success = new ArrayList(_list1614.size); - String _elem1615; - for (int _i1616 = 0; _i1616 < _list1614.size; ++_i1616) + org.apache.thrift.protocol.TList _list1622 = iprot.readListBegin(); + struct.success = new ArrayList(_list1622.size); + String _elem1623; + for (int _i1624 = 0; _i1624 < _list1622.size; ++_i1624) { - _elem1615 = iprot.readString(); - struct.success.add(_elem1615); + _elem1623 = iprot.readString(); + struct.success.add(_elem1623); } iprot.readListEnd(); } @@ -124956,9 +125242,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1617 : struct.success) + for (String _iter1625 : struct.success) { - oprot.writeString(_iter1617); + oprot.writeString(_iter1625); } oprot.writeListEnd(); } @@ -125005,9 +125291,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1618 : struct.success) + for (String _iter1626 : struct.success) { - oprot.writeString(_iter1618); + oprot.writeString(_iter1626); } } } @@ -125025,13 +125311,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1619.size); - String _elem1620; - for (int _i1621 = 0; _i1621 < _list1619.size; ++_i1621) + org.apache.thrift.protocol.TList _list1627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1627.size); + String _elem1628; + for (int _i1629 = 0; _i1629 < _list1627.size; ++_i1629) { - _elem1620 = iprot.readString(); - struct.success.add(_elem1620); + _elem1628 = iprot.readString(); + struct.success.add(_elem1628); } } struct.setSuccessIsSet(true); @@ -126562,13 +126848,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1622 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1622.size); - String _elem1623; - for (int _i1624 = 0; _i1624 < _list1622.size; ++_i1624) + org.apache.thrift.protocol.TList _list1630 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1630.size); + String _elem1631; + for (int _i1632 = 0; _i1632 < _list1630.size; ++_i1632) { - _elem1623 = iprot.readString(); - struct.part_vals.add(_elem1623); + _elem1631 = iprot.readString(); + struct.part_vals.add(_elem1631); } iprot.readListEnd(); } @@ -126612,9 +126898,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1625 : struct.part_vals) + for (String _iter1633 : struct.part_vals) { - oprot.writeString(_iter1625); + oprot.writeString(_iter1633); } oprot.writeListEnd(); } @@ -126663,9 +126949,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1626 : struct.part_vals) + for (String _iter1634 : struct.part_vals) { - oprot.writeString(_iter1626); + oprot.writeString(_iter1634); } } } @@ -126688,13 +126974,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1627.size); - String _elem1628; - for (int _i1629 = 0; _i1629 < _list1627.size; ++_i1629) + org.apache.thrift.protocol.TList _list1635 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1635.size); + String _elem1636; + for (int _i1637 = 0; _i1637 < _list1635.size; ++_i1637) { - _elem1628 = iprot.readString(); - struct.part_vals.add(_elem1628); + _elem1636 = iprot.readString(); + struct.part_vals.add(_elem1636); } } struct.setPart_valsIsSet(true); @@ -127185,14 +127471,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1630 = iprot.readListBegin(); - struct.success = new ArrayList(_list1630.size); - Partition _elem1631; - for (int _i1632 = 0; _i1632 < _list1630.size; ++_i1632) + org.apache.thrift.protocol.TList _list1638 = iprot.readListBegin(); + struct.success = new ArrayList(_list1638.size); + Partition _elem1639; + for (int _i1640 = 0; _i1640 < _list1638.size; ++_i1640) { - _elem1631 = new Partition(); - _elem1631.read(iprot); - struct.success.add(_elem1631); + _elem1639 = new Partition(); + _elem1639.read(iprot); + struct.success.add(_elem1639); } iprot.readListEnd(); } @@ -127236,9 +127522,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1633 : struct.success) + for (Partition _iter1641 : struct.success) { - _iter1633.write(oprot); + _iter1641.write(oprot); } oprot.writeListEnd(); } @@ -127285,9 +127571,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1634 : struct.success) + for (Partition _iter1642 : struct.success) { - _iter1634.write(oprot); + _iter1642.write(oprot); } } } @@ -127305,14 +127591,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1635 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1635.size); - Partition _elem1636; - for (int _i1637 = 0; _i1637 < _list1635.size; ++_i1637) + org.apache.thrift.protocol.TList _list1643 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1643.size); + Partition _elem1644; + for (int _i1645 = 0; _i1645 < _list1643.size; ++_i1645) { - _elem1636 = new Partition(); - _elem1636.read(iprot); - struct.success.add(_elem1636); + _elem1644 = new Partition(); + _elem1644.read(iprot); + struct.success.add(_elem1644); } } struct.setSuccessIsSet(true); @@ -128084,13 +128370,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1638 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1638.size); - String _elem1639; - for (int _i1640 = 0; _i1640 < _list1638.size; ++_i1640) + org.apache.thrift.protocol.TList _list1646 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1646.size); + String _elem1647; + for (int _i1648 = 0; _i1648 < _list1646.size; ++_i1648) { - _elem1639 = iprot.readString(); - struct.part_vals.add(_elem1639); + _elem1647 = iprot.readString(); + struct.part_vals.add(_elem1647); } iprot.readListEnd(); } @@ -128118,13 +128404,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1641 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1641.size); - String _elem1642; - for (int _i1643 = 0; _i1643 < _list1641.size; ++_i1643) + org.apache.thrift.protocol.TList _list1649 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1649.size); + String _elem1650; + for (int _i1651 = 0; _i1651 < _list1649.size; ++_i1651) { - _elem1642 = iprot.readString(); - struct.group_names.add(_elem1642); + _elem1650 = iprot.readString(); + struct.group_names.add(_elem1650); } iprot.readListEnd(); } @@ -128160,9 +128446,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1644 : struct.part_vals) + for (String _iter1652 : struct.part_vals) { - oprot.writeString(_iter1644); + oprot.writeString(_iter1652); } oprot.writeListEnd(); } @@ -128180,9 +128466,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1645 : struct.group_names) + for (String _iter1653 : struct.group_names) { - oprot.writeString(_iter1645); + oprot.writeString(_iter1653); } oprot.writeListEnd(); } @@ -128234,9 +128520,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1646 : struct.part_vals) + for (String _iter1654 : struct.part_vals) { - oprot.writeString(_iter1646); + oprot.writeString(_iter1654); } } } @@ -128249,9 +128535,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1647 : struct.group_names) + for (String _iter1655 : struct.group_names) { - oprot.writeString(_iter1647); + oprot.writeString(_iter1655); } } } @@ -128271,13 +128557,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1648 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1648.size); - String _elem1649; - for (int _i1650 = 0; _i1650 < _list1648.size; ++_i1650) + org.apache.thrift.protocol.TList _list1656 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1656.size); + String _elem1657; + for (int _i1658 = 0; _i1658 < _list1656.size; ++_i1658) { - _elem1649 = iprot.readString(); - struct.part_vals.add(_elem1649); + _elem1657 = iprot.readString(); + struct.part_vals.add(_elem1657); } } struct.setPart_valsIsSet(true); @@ -128292,13 +128578,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1651 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1651.size); - String _elem1652; - for (int _i1653 = 0; _i1653 < _list1651.size; ++_i1653) + org.apache.thrift.protocol.TList _list1659 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1659.size); + String _elem1660; + for (int _i1661 = 0; _i1661 < _list1659.size; ++_i1661) { - _elem1652 = iprot.readString(); - struct.group_names.add(_elem1652); + _elem1660 = iprot.readString(); + struct.group_names.add(_elem1660); } } struct.setGroup_namesIsSet(true); @@ -128785,14 +129071,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1654 = iprot.readListBegin(); - struct.success = new ArrayList(_list1654.size); - Partition _elem1655; - for (int _i1656 = 0; _i1656 < _list1654.size; ++_i1656) + org.apache.thrift.protocol.TList _list1662 = iprot.readListBegin(); + struct.success = new ArrayList(_list1662.size); + Partition _elem1663; + for (int _i1664 = 0; _i1664 < _list1662.size; ++_i1664) { - _elem1655 = new Partition(); - _elem1655.read(iprot); - struct.success.add(_elem1655); + _elem1663 = new Partition(); + _elem1663.read(iprot); + struct.success.add(_elem1663); } iprot.readListEnd(); } @@ -128836,9 +129122,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1657 : struct.success) + for (Partition _iter1665 : struct.success) { - _iter1657.write(oprot); + _iter1665.write(oprot); } oprot.writeListEnd(); } @@ -128885,9 +129171,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1658 : struct.success) + for (Partition _iter1666 : struct.success) { - _iter1658.write(oprot); + _iter1666.write(oprot); } } } @@ -128905,14 +129191,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1659 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1659.size); - Partition _elem1660; - for (int _i1661 = 0; _i1661 < _list1659.size; ++_i1661) + org.apache.thrift.protocol.TList _list1667 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1667.size); + Partition _elem1668; + for (int _i1669 = 0; _i1669 < _list1667.size; ++_i1669) { - _elem1660 = new Partition(); - _elem1660.read(iprot); - struct.success.add(_elem1660); + _elem1668 = new Partition(); + _elem1668.read(iprot); + struct.success.add(_elem1668); } } struct.setSuccessIsSet(true); @@ -130443,13 +130729,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1662 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1662.size); - String _elem1663; - for (int _i1664 = 0; _i1664 < _list1662.size; ++_i1664) + org.apache.thrift.protocol.TList _list1670 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1670.size); + String _elem1671; + for (int _i1672 = 0; _i1672 < _list1670.size; ++_i1672) { - _elem1663 = iprot.readString(); - struct.part_vals.add(_elem1663); + _elem1671 = iprot.readString(); + struct.part_vals.add(_elem1671); } iprot.readListEnd(); } @@ -130493,9 +130779,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1665 : struct.part_vals) + for (String _iter1673 : struct.part_vals) { - oprot.writeString(_iter1665); + oprot.writeString(_iter1673); } oprot.writeListEnd(); } @@ -130544,9 +130830,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1666 : struct.part_vals) + for (String _iter1674 : struct.part_vals) { - oprot.writeString(_iter1666); + oprot.writeString(_iter1674); } } } @@ -130569,13 +130855,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1667 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1667.size); - String _elem1668; - for (int _i1669 = 0; _i1669 < _list1667.size; ++_i1669) + org.apache.thrift.protocol.TList _list1675 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1675.size); + String _elem1676; + for (int _i1677 = 0; _i1677 < _list1675.size; ++_i1677) { - _elem1668 = iprot.readString(); - struct.part_vals.add(_elem1668); + _elem1676 = iprot.readString(); + struct.part_vals.add(_elem1676); } } struct.setPart_valsIsSet(true); @@ -131063,13 +131349,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1670 = iprot.readListBegin(); - struct.success = new ArrayList(_list1670.size); - String _elem1671; - for (int _i1672 = 0; _i1672 < _list1670.size; ++_i1672) + org.apache.thrift.protocol.TList _list1678 = iprot.readListBegin(); + struct.success = new ArrayList(_list1678.size); + String _elem1679; + for (int _i1680 = 0; _i1680 < _list1678.size; ++_i1680) { - _elem1671 = iprot.readString(); - struct.success.add(_elem1671); + _elem1679 = iprot.readString(); + struct.success.add(_elem1679); } iprot.readListEnd(); } @@ -131113,9 +131399,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1673 : struct.success) + for (String _iter1681 : struct.success) { - oprot.writeString(_iter1673); + oprot.writeString(_iter1681); } oprot.writeListEnd(); } @@ -131162,9 +131448,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1674 : struct.success) + for (String _iter1682 : struct.success) { - oprot.writeString(_iter1674); + oprot.writeString(_iter1682); } } } @@ -131182,13 +131468,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1675 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1675.size); - String _elem1676; - for (int _i1677 = 0; _i1677 < _list1675.size; ++_i1677) + org.apache.thrift.protocol.TList _list1683 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1683.size); + String _elem1684; + for (int _i1685 = 0; _i1685 < _list1683.size; ++_i1685) { - _elem1676 = iprot.readString(); - struct.success.add(_elem1676); + _elem1684 = iprot.readString(); + struct.success.add(_elem1684); } } struct.setSuccessIsSet(true); @@ -132983,13 +133269,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1678 = iprot.readListBegin(); - struct.success = new ArrayList(_list1678.size); - String _elem1679; - for (int _i1680 = 0; _i1680 < _list1678.size; ++_i1680) + org.apache.thrift.protocol.TList _list1686 = iprot.readListBegin(); + struct.success = new ArrayList(_list1686.size); + String _elem1687; + for (int _i1688 = 0; _i1688 < _list1686.size; ++_i1688) { - _elem1679 = iprot.readString(); - struct.success.add(_elem1679); + _elem1687 = iprot.readString(); + struct.success.add(_elem1687); } iprot.readListEnd(); } @@ -133033,9 +133319,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1681 : struct.success) + for (String _iter1689 : struct.success) { - oprot.writeString(_iter1681); + oprot.writeString(_iter1689); } oprot.writeListEnd(); } @@ -133082,9 +133368,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1682 : struct.success) + for (String _iter1690 : struct.success) { - oprot.writeString(_iter1682); + oprot.writeString(_iter1690); } } } @@ -133102,13 +133388,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1683 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1683.size); - String _elem1684; - for (int _i1685 = 0; _i1685 < _list1683.size; ++_i1685) + org.apache.thrift.protocol.TList _list1691 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1691.size); + String _elem1692; + for (int _i1693 = 0; _i1693 < _list1691.size; ++_i1693) { - _elem1684 = iprot.readString(); - struct.success.add(_elem1684); + _elem1692 = iprot.readString(); + struct.success.add(_elem1692); } } struct.setSuccessIsSet(true); @@ -134275,14 +134561,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1686 = iprot.readListBegin(); - struct.success = new ArrayList(_list1686.size); - Partition _elem1687; - for (int _i1688 = 0; _i1688 < _list1686.size; ++_i1688) + org.apache.thrift.protocol.TList _list1694 = iprot.readListBegin(); + struct.success = new ArrayList(_list1694.size); + Partition _elem1695; + for (int _i1696 = 0; _i1696 < _list1694.size; ++_i1696) { - _elem1687 = new Partition(); - _elem1687.read(iprot); - struct.success.add(_elem1687); + _elem1695 = new Partition(); + _elem1695.read(iprot); + struct.success.add(_elem1695); } iprot.readListEnd(); } @@ -134326,9 +134612,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1689 : struct.success) + for (Partition _iter1697 : struct.success) { - _iter1689.write(oprot); + _iter1697.write(oprot); } oprot.writeListEnd(); } @@ -134375,9 +134661,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1690 : struct.success) + for (Partition _iter1698 : struct.success) { - _iter1690.write(oprot); + _iter1698.write(oprot); } } } @@ -134395,14 +134681,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1691 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1691.size); - Partition _elem1692; - for (int _i1693 = 0; _i1693 < _list1691.size; ++_i1693) + org.apache.thrift.protocol.TList _list1699 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1699.size); + Partition _elem1700; + for (int _i1701 = 0; _i1701 < _list1699.size; ++_i1701) { - _elem1692 = new Partition(); - _elem1692.read(iprot); - struct.success.add(_elem1692); + _elem1700 = new Partition(); + _elem1700.read(iprot); + struct.success.add(_elem1700); } } struct.setSuccessIsSet(true); @@ -135569,14 +135855,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1694 = iprot.readListBegin(); - struct.success = new ArrayList(_list1694.size); - PartitionSpec _elem1695; - for (int _i1696 = 0; _i1696 < _list1694.size; ++_i1696) + org.apache.thrift.protocol.TList _list1702 = iprot.readListBegin(); + struct.success = new ArrayList(_list1702.size); + PartitionSpec _elem1703; + for (int _i1704 = 0; _i1704 < _list1702.size; ++_i1704) { - _elem1695 = new PartitionSpec(); - _elem1695.read(iprot); - struct.success.add(_elem1695); + _elem1703 = new PartitionSpec(); + _elem1703.read(iprot); + struct.success.add(_elem1703); } iprot.readListEnd(); } @@ -135620,9 +135906,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1697 : struct.success) + for (PartitionSpec _iter1705 : struct.success) { - _iter1697.write(oprot); + _iter1705.write(oprot); } oprot.writeListEnd(); } @@ -135669,9 +135955,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1698 : struct.success) + for (PartitionSpec _iter1706 : struct.success) { - _iter1698.write(oprot); + _iter1706.write(oprot); } } } @@ -135689,14 +135975,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1699 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1699.size); - PartitionSpec _elem1700; - for (int _i1701 = 0; _i1701 < _list1699.size; ++_i1701) + org.apache.thrift.protocol.TList _list1707 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1707.size); + PartitionSpec _elem1708; + for (int _i1709 = 0; _i1709 < _list1707.size; ++_i1709) { - _elem1700 = new PartitionSpec(); - _elem1700.read(iprot); - struct.success.add(_elem1700); + _elem1708 = new PartitionSpec(); + _elem1708.read(iprot); + struct.success.add(_elem1708); } } struct.setSuccessIsSet(true); @@ -139218,13 +139504,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1702 = iprot.readListBegin(); - struct.names = new ArrayList(_list1702.size); - String _elem1703; - for (int _i1704 = 0; _i1704 < _list1702.size; ++_i1704) + org.apache.thrift.protocol.TList _list1710 = iprot.readListBegin(); + struct.names = new ArrayList(_list1710.size); + String _elem1711; + for (int _i1712 = 0; _i1712 < _list1710.size; ++_i1712) { - _elem1703 = iprot.readString(); - struct.names.add(_elem1703); + _elem1711 = iprot.readString(); + struct.names.add(_elem1711); } iprot.readListEnd(); } @@ -139260,9 +139546,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1705 : struct.names) + for (String _iter1713 : struct.names) { - oprot.writeString(_iter1705); + oprot.writeString(_iter1713); } oprot.writeListEnd(); } @@ -139305,9 +139591,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1706 : struct.names) + for (String _iter1714 : struct.names) { - oprot.writeString(_iter1706); + oprot.writeString(_iter1714); } } } @@ -139327,13 +139613,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1707 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1707.size); - String _elem1708; - for (int _i1709 = 0; _i1709 < _list1707.size; ++_i1709) + org.apache.thrift.protocol.TList _list1715 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1715.size); + String _elem1716; + for (int _i1717 = 0; _i1717 < _list1715.size; ++_i1717) { - _elem1708 = iprot.readString(); - struct.names.add(_elem1708); + _elem1716 = iprot.readString(); + struct.names.add(_elem1716); } } struct.setNamesIsSet(true); @@ -139820,14 +140106,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1710 = iprot.readListBegin(); - struct.success = new ArrayList(_list1710.size); - Partition _elem1711; - for (int _i1712 = 0; _i1712 < _list1710.size; ++_i1712) + org.apache.thrift.protocol.TList _list1718 = iprot.readListBegin(); + struct.success = new ArrayList(_list1718.size); + Partition _elem1719; + for (int _i1720 = 0; _i1720 < _list1718.size; ++_i1720) { - _elem1711 = new Partition(); - _elem1711.read(iprot); - struct.success.add(_elem1711); + _elem1719 = new Partition(); + _elem1719.read(iprot); + struct.success.add(_elem1719); } iprot.readListEnd(); } @@ -139871,9 +140157,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1713 : struct.success) + for (Partition _iter1721 : struct.success) { - _iter1713.write(oprot); + _iter1721.write(oprot); } oprot.writeListEnd(); } @@ -139920,9 +140206,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1714 : struct.success) + for (Partition _iter1722 : struct.success) { - _iter1714.write(oprot); + _iter1722.write(oprot); } } } @@ -139940,14 +140226,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1715 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1715.size); - Partition _elem1716; - for (int _i1717 = 0; _i1717 < _list1715.size; ++_i1717) + org.apache.thrift.protocol.TList _list1723 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1723.size); + Partition _elem1724; + for (int _i1725 = 0; _i1725 < _list1723.size; ++_i1725) { - _elem1716 = new Partition(); - _elem1716.read(iprot); - struct.success.add(_elem1716); + _elem1724 = new Partition(); + _elem1724.read(iprot); + struct.success.add(_elem1724); } } struct.setSuccessIsSet(true); @@ -142435,14 +142721,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1718 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1718.size); - Partition _elem1719; - for (int _i1720 = 0; _i1720 < _list1718.size; ++_i1720) + org.apache.thrift.protocol.TList _list1726 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1726.size); + Partition _elem1727; + for (int _i1728 = 0; _i1728 < _list1726.size; ++_i1728) { - _elem1719 = new Partition(); - _elem1719.read(iprot); - struct.new_parts.add(_elem1719); + _elem1727 = new Partition(); + _elem1727.read(iprot); + struct.new_parts.add(_elem1727); } iprot.readListEnd(); } @@ -142478,9 +142764,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1721 : struct.new_parts) + for (Partition _iter1729 : struct.new_parts) { - _iter1721.write(oprot); + _iter1729.write(oprot); } oprot.writeListEnd(); } @@ -142523,9 +142809,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1722 : struct.new_parts) + for (Partition _iter1730 : struct.new_parts) { - _iter1722.write(oprot); + _iter1730.write(oprot); } } } @@ -142545,14 +142831,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1723 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1723.size); - Partition _elem1724; - for (int _i1725 = 0; _i1725 < _list1723.size; ++_i1725) + org.apache.thrift.protocol.TList _list1731 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1731.size); + Partition _elem1732; + for (int _i1733 = 0; _i1733 < _list1731.size; ++_i1733) { - _elem1724 = new Partition(); - _elem1724.read(iprot); - struct.new_parts.add(_elem1724); + _elem1732 = new Partition(); + _elem1732.read(iprot); + struct.new_parts.add(_elem1732); } } struct.setNew_partsIsSet(true); @@ -143605,14 +143891,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1726 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1726.size); - Partition _elem1727; - for (int _i1728 = 0; _i1728 < _list1726.size; ++_i1728) + org.apache.thrift.protocol.TList _list1734 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1734.size); + Partition _elem1735; + for (int _i1736 = 0; _i1736 < _list1734.size; ++_i1736) { - _elem1727 = new Partition(); - _elem1727.read(iprot); - struct.new_parts.add(_elem1727); + _elem1735 = new Partition(); + _elem1735.read(iprot); + struct.new_parts.add(_elem1735); } iprot.readListEnd(); } @@ -143657,9 +143943,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1729 : struct.new_parts) + for (Partition _iter1737 : struct.new_parts) { - _iter1729.write(oprot); + _iter1737.write(oprot); } oprot.writeListEnd(); } @@ -143710,9 +143996,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1730 : struct.new_parts) + for (Partition _iter1738 : struct.new_parts) { - _iter1730.write(oprot); + _iter1738.write(oprot); } } } @@ -143735,14 +144021,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1731 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1731.size); - Partition _elem1732; - for (int _i1733 = 0; _i1733 < _list1731.size; ++_i1733) + org.apache.thrift.protocol.TList _list1739 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1739.size); + Partition _elem1740; + for (int _i1741 = 0; _i1741 < _list1739.size; ++_i1741) { - _elem1732 = new Partition(); - _elem1732.read(iprot); - struct.new_parts.add(_elem1732); + _elem1740 = new Partition(); + _elem1740.read(iprot); + struct.new_parts.add(_elem1740); } } struct.setNew_partsIsSet(true); @@ -146881,13 +147167,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1734 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1734.size); - String _elem1735; - for (int _i1736 = 0; _i1736 < _list1734.size; ++_i1736) + org.apache.thrift.protocol.TList _list1742 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1742.size); + String _elem1743; + for (int _i1744 = 0; _i1744 < _list1742.size; ++_i1744) { - _elem1735 = iprot.readString(); - struct.part_vals.add(_elem1735); + _elem1743 = iprot.readString(); + struct.part_vals.add(_elem1743); } iprot.readListEnd(); } @@ -146932,9 +147218,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1737 : struct.part_vals) + for (String _iter1745 : struct.part_vals) { - oprot.writeString(_iter1737); + oprot.writeString(_iter1745); } oprot.writeListEnd(); } @@ -146985,9 +147271,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1738 : struct.part_vals) + for (String _iter1746 : struct.part_vals) { - oprot.writeString(_iter1738); + oprot.writeString(_iter1746); } } } @@ -147010,13 +147296,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1739 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1739.size); - String _elem1740; - for (int _i1741 = 0; _i1741 < _list1739.size; ++_i1741) + org.apache.thrift.protocol.TList _list1747 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1747.size); + String _elem1748; + for (int _i1749 = 0; _i1749 < _list1747.size; ++_i1749) { - _elem1740 = iprot.readString(); - struct.part_vals.add(_elem1740); + _elem1748 = iprot.readString(); + struct.part_vals.add(_elem1748); } } struct.setPart_valsIsSet(true); @@ -148828,13 +149114,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1742 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1742.size); - String _elem1743; - for (int _i1744 = 0; _i1744 < _list1742.size; ++_i1744) + org.apache.thrift.protocol.TList _list1750 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1750.size); + String _elem1751; + for (int _i1752 = 0; _i1752 < _list1750.size; ++_i1752) { - _elem1743 = iprot.readString(); - struct.part_vals.add(_elem1743); + _elem1751 = iprot.readString(); + struct.part_vals.add(_elem1751); } iprot.readListEnd(); } @@ -148868,9 +149154,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1745 : struct.part_vals) + for (String _iter1753 : struct.part_vals) { - oprot.writeString(_iter1745); + oprot.writeString(_iter1753); } oprot.writeListEnd(); } @@ -148907,9 +149193,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1746 : struct.part_vals) + for (String _iter1754 : struct.part_vals) { - oprot.writeString(_iter1746); + oprot.writeString(_iter1754); } } } @@ -148924,13 +149210,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1747 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1747.size); - String _elem1748; - for (int _i1749 = 0; _i1749 < _list1747.size; ++_i1749) + org.apache.thrift.protocol.TList _list1755 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1755.size); + String _elem1756; + for (int _i1757 = 0; _i1757 < _list1755.size; ++_i1757) { - _elem1748 = iprot.readString(); - struct.part_vals.add(_elem1748); + _elem1756 = iprot.readString(); + struct.part_vals.add(_elem1756); } } struct.setPart_valsIsSet(true); @@ -151085,13 +151371,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1750 = iprot.readListBegin(); - struct.success = new ArrayList(_list1750.size); - String _elem1751; - for (int _i1752 = 0; _i1752 < _list1750.size; ++_i1752) + org.apache.thrift.protocol.TList _list1758 = iprot.readListBegin(); + struct.success = new ArrayList(_list1758.size); + String _elem1759; + for (int _i1760 = 0; _i1760 < _list1758.size; ++_i1760) { - _elem1751 = iprot.readString(); - struct.success.add(_elem1751); + _elem1759 = iprot.readString(); + struct.success.add(_elem1759); } iprot.readListEnd(); } @@ -151126,9 +151412,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1753 : struct.success) + for (String _iter1761 : struct.success) { - oprot.writeString(_iter1753); + oprot.writeString(_iter1761); } oprot.writeListEnd(); } @@ -151167,9 +151453,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1754 : struct.success) + for (String _iter1762 : struct.success) { - oprot.writeString(_iter1754); + oprot.writeString(_iter1762); } } } @@ -151184,13 +151470,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1755 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1755.size); - String _elem1756; - for (int _i1757 = 0; _i1757 < _list1755.size; ++_i1757) + org.apache.thrift.protocol.TList _list1763 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1763.size); + String _elem1764; + for (int _i1765 = 0; _i1765 < _list1763.size; ++_i1765) { - _elem1756 = iprot.readString(); - struct.success.add(_elem1756); + _elem1764 = iprot.readString(); + struct.success.add(_elem1764); } } struct.setSuccessIsSet(true); @@ -151953,15 +152239,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1758 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1758.size); - String _key1759; - String _val1760; - for (int _i1761 = 0; _i1761 < _map1758.size; ++_i1761) + org.apache.thrift.protocol.TMap _map1766 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1766.size); + String _key1767; + String _val1768; + for (int _i1769 = 0; _i1769 < _map1766.size; ++_i1769) { - _key1759 = iprot.readString(); - _val1760 = iprot.readString(); - struct.success.put(_key1759, _val1760); + _key1767 = iprot.readString(); + _val1768 = iprot.readString(); + struct.success.put(_key1767, _val1768); } iprot.readMapEnd(); } @@ -151996,10 +152282,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1762 : struct.success.entrySet()) + for (Map.Entry _iter1770 : struct.success.entrySet()) { - oprot.writeString(_iter1762.getKey()); - oprot.writeString(_iter1762.getValue()); + oprot.writeString(_iter1770.getKey()); + oprot.writeString(_iter1770.getValue()); } oprot.writeMapEnd(); } @@ -152038,10 +152324,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1763 : struct.success.entrySet()) + for (Map.Entry _iter1771 : struct.success.entrySet()) { - oprot.writeString(_iter1763.getKey()); - oprot.writeString(_iter1763.getValue()); + oprot.writeString(_iter1771.getKey()); + oprot.writeString(_iter1771.getValue()); } } } @@ -152056,15 +152342,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1764 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1764.size); - String _key1765; - String _val1766; - for (int _i1767 = 0; _i1767 < _map1764.size; ++_i1767) + org.apache.thrift.protocol.TMap _map1772 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1772.size); + String _key1773; + String _val1774; + for (int _i1775 = 0; _i1775 < _map1772.size; ++_i1775) { - _key1765 = iprot.readString(); - _val1766 = iprot.readString(); - struct.success.put(_key1765, _val1766); + _key1773 = iprot.readString(); + _val1774 = iprot.readString(); + struct.success.put(_key1773, _val1774); } } struct.setSuccessIsSet(true); @@ -152659,15 +152945,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1768 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1768.size); - String _key1769; - String _val1770; - for (int _i1771 = 0; _i1771 < _map1768.size; ++_i1771) + org.apache.thrift.protocol.TMap _map1776 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1776.size); + String _key1777; + String _val1778; + for (int _i1779 = 0; _i1779 < _map1776.size; ++_i1779) { - _key1769 = iprot.readString(); - _val1770 = iprot.readString(); - struct.part_vals.put(_key1769, _val1770); + _key1777 = iprot.readString(); + _val1778 = iprot.readString(); + struct.part_vals.put(_key1777, _val1778); } iprot.readMapEnd(); } @@ -152711,10 +152997,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1772 : struct.part_vals.entrySet()) + for (Map.Entry _iter1780 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1772.getKey()); - oprot.writeString(_iter1772.getValue()); + oprot.writeString(_iter1780.getKey()); + oprot.writeString(_iter1780.getValue()); } oprot.writeMapEnd(); } @@ -152765,10 +153051,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1773 : struct.part_vals.entrySet()) + for (Map.Entry _iter1781 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1773.getKey()); - oprot.writeString(_iter1773.getValue()); + oprot.writeString(_iter1781.getKey()); + oprot.writeString(_iter1781.getValue()); } } } @@ -152791,15 +153077,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1774 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1774.size); - String _key1775; - String _val1776; - for (int _i1777 = 0; _i1777 < _map1774.size; ++_i1777) + org.apache.thrift.protocol.TMap _map1782 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1782.size); + String _key1783; + String _val1784; + for (int _i1785 = 0; _i1785 < _map1782.size; ++_i1785) { - _key1775 = iprot.readString(); - _val1776 = iprot.readString(); - struct.part_vals.put(_key1775, _val1776); + _key1783 = iprot.readString(); + _val1784 = iprot.readString(); + struct.part_vals.put(_key1783, _val1784); } } struct.setPart_valsIsSet(true); @@ -154283,15 +154569,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1778 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1778.size); - String _key1779; - String _val1780; - for (int _i1781 = 0; _i1781 < _map1778.size; ++_i1781) + org.apache.thrift.protocol.TMap _map1786 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1786.size); + String _key1787; + String _val1788; + for (int _i1789 = 0; _i1789 < _map1786.size; ++_i1789) { - _key1779 = iprot.readString(); - _val1780 = iprot.readString(); - struct.part_vals.put(_key1779, _val1780); + _key1787 = iprot.readString(); + _val1788 = iprot.readString(); + struct.part_vals.put(_key1787, _val1788); } iprot.readMapEnd(); } @@ -154335,10 +154621,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1782 : struct.part_vals.entrySet()) + for (Map.Entry _iter1790 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1782.getKey()); - oprot.writeString(_iter1782.getValue()); + oprot.writeString(_iter1790.getKey()); + oprot.writeString(_iter1790.getValue()); } oprot.writeMapEnd(); } @@ -154389,10 +154675,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1783 : struct.part_vals.entrySet()) + for (Map.Entry _iter1791 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1783.getKey()); - oprot.writeString(_iter1783.getValue()); + oprot.writeString(_iter1791.getKey()); + oprot.writeString(_iter1791.getValue()); } } } @@ -154415,15 +154701,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1784 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1784.size); - String _key1785; - String _val1786; - for (int _i1787 = 0; _i1787 < _map1784.size; ++_i1787) + org.apache.thrift.protocol.TMap _map1792 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1792.size); + String _key1793; + String _val1794; + for (int _i1795 = 0; _i1795 < _map1792.size; ++_i1795) { - _key1785 = iprot.readString(); - _val1786 = iprot.readString(); - struct.part_vals.put(_key1785, _val1786); + _key1793 = iprot.readString(); + _val1794 = iprot.readString(); + struct.part_vals.put(_key1793, _val1794); } } struct.setPart_valsIsSet(true); @@ -179287,13 +179573,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1788 = iprot.readListBegin(); - struct.success = new ArrayList(_list1788.size); - String _elem1789; - for (int _i1790 = 0; _i1790 < _list1788.size; ++_i1790) + org.apache.thrift.protocol.TList _list1796 = iprot.readListBegin(); + struct.success = new ArrayList(_list1796.size); + String _elem1797; + for (int _i1798 = 0; _i1798 < _list1796.size; ++_i1798) { - _elem1789 = iprot.readString(); - struct.success.add(_elem1789); + _elem1797 = iprot.readString(); + struct.success.add(_elem1797); } iprot.readListEnd(); } @@ -179328,9 +179614,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1791 : struct.success) + for (String _iter1799 : struct.success) { - oprot.writeString(_iter1791); + oprot.writeString(_iter1799); } oprot.writeListEnd(); } @@ -179369,9 +179655,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1792 : struct.success) + for (String _iter1800 : struct.success) { - oprot.writeString(_iter1792); + oprot.writeString(_iter1800); } } } @@ -179386,13 +179672,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1793.size); - String _elem1794; - for (int _i1795 = 0; _i1795 < _list1793.size; ++_i1795) + org.apache.thrift.protocol.TList _list1801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1801.size); + String _elem1802; + for (int _i1803 = 0; _i1803 < _list1801.size; ++_i1803) { - _elem1794 = iprot.readString(); - struct.success.add(_elem1794); + _elem1802 = iprot.readString(); + struct.success.add(_elem1802); } } struct.setSuccessIsSet(true); @@ -183447,13 +183733,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1796 = iprot.readListBegin(); - struct.success = new ArrayList(_list1796.size); - String _elem1797; - for (int _i1798 = 0; _i1798 < _list1796.size; ++_i1798) + org.apache.thrift.protocol.TList _list1804 = iprot.readListBegin(); + struct.success = new ArrayList(_list1804.size); + String _elem1805; + for (int _i1806 = 0; _i1806 < _list1804.size; ++_i1806) { - _elem1797 = iprot.readString(); - struct.success.add(_elem1797); + _elem1805 = iprot.readString(); + struct.success.add(_elem1805); } iprot.readListEnd(); } @@ -183488,9 +183774,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1799 : struct.success) + for (String _iter1807 : struct.success) { - oprot.writeString(_iter1799); + oprot.writeString(_iter1807); } oprot.writeListEnd(); } @@ -183529,9 +183815,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1800 : struct.success) + for (String _iter1808 : struct.success) { - oprot.writeString(_iter1800); + oprot.writeString(_iter1808); } } } @@ -183546,13 +183832,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1801.size); - String _elem1802; - for (int _i1803 = 0; _i1803 < _list1801.size; ++_i1803) + org.apache.thrift.protocol.TList _list1809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1809.size); + String _elem1810; + for (int _i1811 = 0; _i1811 < _list1809.size; ++_i1811) { - _elem1802 = iprot.readString(); - struct.success.add(_elem1802); + _elem1810 = iprot.readString(); + struct.success.add(_elem1810); } } struct.setSuccessIsSet(true); @@ -186843,14 +187129,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1804 = iprot.readListBegin(); - struct.success = new ArrayList(_list1804.size); - Role _elem1805; - for (int _i1806 = 0; _i1806 < _list1804.size; ++_i1806) + org.apache.thrift.protocol.TList _list1812 = iprot.readListBegin(); + struct.success = new ArrayList(_list1812.size); + Role _elem1813; + for (int _i1814 = 0; _i1814 < _list1812.size; ++_i1814) { - _elem1805 = new Role(); - _elem1805.read(iprot); - struct.success.add(_elem1805); + _elem1813 = new Role(); + _elem1813.read(iprot); + struct.success.add(_elem1813); } iprot.readListEnd(); } @@ -186885,9 +187171,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1807 : struct.success) + for (Role _iter1815 : struct.success) { - _iter1807.write(oprot); + _iter1815.write(oprot); } oprot.writeListEnd(); } @@ -186926,9 +187212,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1808 : struct.success) + for (Role _iter1816 : struct.success) { - _iter1808.write(oprot); + _iter1816.write(oprot); } } } @@ -186943,14 +187229,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1809.size); - Role _elem1810; - for (int _i1811 = 0; _i1811 < _list1809.size; ++_i1811) + org.apache.thrift.protocol.TList _list1817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1817.size); + Role _elem1818; + for (int _i1819 = 0; _i1819 < _list1817.size; ++_i1819) { - _elem1810 = new Role(); - _elem1810.read(iprot); - struct.success.add(_elem1810); + _elem1818 = new Role(); + _elem1818.read(iprot); + struct.success.add(_elem1818); } } struct.setSuccessIsSet(true); @@ -189955,13 +190241,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1812 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1812.size); - String _elem1813; - for (int _i1814 = 0; _i1814 < _list1812.size; ++_i1814) + org.apache.thrift.protocol.TList _list1820 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1820.size); + String _elem1821; + for (int _i1822 = 0; _i1822 < _list1820.size; ++_i1822) { - _elem1813 = iprot.readString(); - struct.group_names.add(_elem1813); + _elem1821 = iprot.readString(); + struct.group_names.add(_elem1821); } iprot.readListEnd(); } @@ -189997,9 +190283,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1815 : struct.group_names) + for (String _iter1823 : struct.group_names) { - oprot.writeString(_iter1815); + oprot.writeString(_iter1823); } oprot.writeListEnd(); } @@ -190042,9 +190328,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1816 : struct.group_names) + for (String _iter1824 : struct.group_names) { - oprot.writeString(_iter1816); + oprot.writeString(_iter1824); } } } @@ -190065,13 +190351,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1817.size); - String _elem1818; - for (int _i1819 = 0; _i1819 < _list1817.size; ++_i1819) + org.apache.thrift.protocol.TList _list1825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1825.size); + String _elem1826; + for (int _i1827 = 0; _i1827 < _list1825.size; ++_i1827) { - _elem1818 = iprot.readString(); - struct.group_names.add(_elem1818); + _elem1826 = iprot.readString(); + struct.group_names.add(_elem1826); } } struct.setGroup_namesIsSet(true); @@ -191529,14 +191815,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1820 = iprot.readListBegin(); - struct.success = new ArrayList(_list1820.size); - HiveObjectPrivilege _elem1821; - for (int _i1822 = 0; _i1822 < _list1820.size; ++_i1822) + org.apache.thrift.protocol.TList _list1828 = iprot.readListBegin(); + struct.success = new ArrayList(_list1828.size); + HiveObjectPrivilege _elem1829; + for (int _i1830 = 0; _i1830 < _list1828.size; ++_i1830) { - _elem1821 = new HiveObjectPrivilege(); - _elem1821.read(iprot); - struct.success.add(_elem1821); + _elem1829 = new HiveObjectPrivilege(); + _elem1829.read(iprot); + struct.success.add(_elem1829); } iprot.readListEnd(); } @@ -191571,9 +191857,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1823 : struct.success) + for (HiveObjectPrivilege _iter1831 : struct.success) { - _iter1823.write(oprot); + _iter1831.write(oprot); } oprot.writeListEnd(); } @@ -191612,9 +191898,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1824 : struct.success) + for (HiveObjectPrivilege _iter1832 : struct.success) { - _iter1824.write(oprot); + _iter1832.write(oprot); } } } @@ -191629,14 +191915,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1825.size); - HiveObjectPrivilege _elem1826; - for (int _i1827 = 0; _i1827 < _list1825.size; ++_i1827) + org.apache.thrift.protocol.TList _list1833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1833.size); + HiveObjectPrivilege _elem1834; + for (int _i1835 = 0; _i1835 < _list1833.size; ++_i1835) { - _elem1826 = new HiveObjectPrivilege(); - _elem1826.read(iprot); - struct.success.add(_elem1826); + _elem1834 = new HiveObjectPrivilege(); + _elem1834.read(iprot); + struct.success.add(_elem1834); } } struct.setSuccessIsSet(true); @@ -195583,13 +195869,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1828 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1828.size); - String _elem1829; - for (int _i1830 = 0; _i1830 < _list1828.size; ++_i1830) + org.apache.thrift.protocol.TList _list1836 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1836.size); + String _elem1837; + for (int _i1838 = 0; _i1838 < _list1836.size; ++_i1838) { - _elem1829 = iprot.readString(); - struct.group_names.add(_elem1829); + _elem1837 = iprot.readString(); + struct.group_names.add(_elem1837); } iprot.readListEnd(); } @@ -195620,9 +195906,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1831 : struct.group_names) + for (String _iter1839 : struct.group_names) { - oprot.writeString(_iter1831); + oprot.writeString(_iter1839); } oprot.writeListEnd(); } @@ -195659,9 +195945,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1832 : struct.group_names) + for (String _iter1840 : struct.group_names) { - oprot.writeString(_iter1832); + oprot.writeString(_iter1840); } } } @@ -195677,13 +195963,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1833.size); - String _elem1834; - for (int _i1835 = 0; _i1835 < _list1833.size; ++_i1835) + org.apache.thrift.protocol.TList _list1841 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1841.size); + String _elem1842; + for (int _i1843 = 0; _i1843 < _list1841.size; ++_i1843) { - _elem1834 = iprot.readString(); - struct.group_names.add(_elem1834); + _elem1842 = iprot.readString(); + struct.group_names.add(_elem1842); } } struct.setGroup_namesIsSet(true); @@ -196086,13 +196372,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1836 = iprot.readListBegin(); - struct.success = new ArrayList(_list1836.size); - String _elem1837; - for (int _i1838 = 0; _i1838 < _list1836.size; ++_i1838) + org.apache.thrift.protocol.TList _list1844 = iprot.readListBegin(); + struct.success = new ArrayList(_list1844.size); + String _elem1845; + for (int _i1846 = 0; _i1846 < _list1844.size; ++_i1846) { - _elem1837 = iprot.readString(); - struct.success.add(_elem1837); + _elem1845 = iprot.readString(); + struct.success.add(_elem1845); } iprot.readListEnd(); } @@ -196127,9 +196413,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1839 : struct.success) + for (String _iter1847 : struct.success) { - oprot.writeString(_iter1839); + oprot.writeString(_iter1847); } oprot.writeListEnd(); } @@ -196168,9 +196454,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1840 : struct.success) + for (String _iter1848 : struct.success) { - oprot.writeString(_iter1840); + oprot.writeString(_iter1848); } } } @@ -196185,13 +196471,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1841 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1841.size); - String _elem1842; - for (int _i1843 = 0; _i1843 < _list1841.size; ++_i1843) + org.apache.thrift.protocol.TList _list1849 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1849.size); + String _elem1850; + for (int _i1851 = 0; _i1851 < _list1849.size; ++_i1851) { - _elem1842 = iprot.readString(); - struct.success.add(_elem1842); + _elem1850 = iprot.readString(); + struct.success.add(_elem1850); } } struct.setSuccessIsSet(true); @@ -201482,13 +201768,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1844 = iprot.readListBegin(); - struct.success = new ArrayList(_list1844.size); - String _elem1845; - for (int _i1846 = 0; _i1846 < _list1844.size; ++_i1846) + org.apache.thrift.protocol.TList _list1852 = iprot.readListBegin(); + struct.success = new ArrayList(_list1852.size); + String _elem1853; + for (int _i1854 = 0; _i1854 < _list1852.size; ++_i1854) { - _elem1845 = iprot.readString(); - struct.success.add(_elem1845); + _elem1853 = iprot.readString(); + struct.success.add(_elem1853); } iprot.readListEnd(); } @@ -201514,9 +201800,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1847 : struct.success) + for (String _iter1855 : struct.success) { - oprot.writeString(_iter1847); + oprot.writeString(_iter1855); } oprot.writeListEnd(); } @@ -201547,9 +201833,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1848 : struct.success) + for (String _iter1856 : struct.success) { - oprot.writeString(_iter1848); + oprot.writeString(_iter1856); } } } @@ -201561,13 +201847,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1849 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1849.size); - String _elem1850; - for (int _i1851 = 0; _i1851 < _list1849.size; ++_i1851) + org.apache.thrift.protocol.TList _list1857 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1857.size); + String _elem1858; + for (int _i1859 = 0; _i1859 < _list1857.size; ++_i1859) { - _elem1850 = iprot.readString(); - struct.success.add(_elem1850); + _elem1858 = iprot.readString(); + struct.success.add(_elem1858); } } struct.setSuccessIsSet(true); @@ -204597,13 +204883,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1852 = iprot.readListBegin(); - struct.success = new ArrayList(_list1852.size); - String _elem1853; - for (int _i1854 = 0; _i1854 < _list1852.size; ++_i1854) + org.apache.thrift.protocol.TList _list1860 = iprot.readListBegin(); + struct.success = new ArrayList(_list1860.size); + String _elem1861; + for (int _i1862 = 0; _i1862 < _list1860.size; ++_i1862) { - _elem1853 = iprot.readString(); - struct.success.add(_elem1853); + _elem1861 = iprot.readString(); + struct.success.add(_elem1861); } iprot.readListEnd(); } @@ -204629,9 +204915,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1855 : struct.success) + for (String _iter1863 : struct.success) { - oprot.writeString(_iter1855); + oprot.writeString(_iter1863); } oprot.writeListEnd(); } @@ -204662,9 +204948,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1856 : struct.success) + for (String _iter1864 : struct.success) { - oprot.writeString(_iter1856); + oprot.writeString(_iter1864); } } } @@ -204676,13 +204962,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1857 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1857.size); - String _elem1858; - for (int _i1859 = 0; _i1859 < _list1857.size; ++_i1859) + org.apache.thrift.protocol.TList _list1865 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1865.size); + String _elem1866; + for (int _i1867 = 0; _i1867 < _list1865.size; ++_i1867) { - _elem1858 = iprot.readString(); - struct.success.add(_elem1858); + _elem1866 = iprot.readString(); + struct.success.add(_elem1866); } } struct.setSuccessIsSet(true); @@ -221803,13 +222089,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, find_columns_with_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1860 = iprot.readListBegin(); - struct.success = new ArrayList(_list1860.size); - String _elem1861; - for (int _i1862 = 0; _i1862 < _list1860.size; ++_i1862) + org.apache.thrift.protocol.TList _list1868 = iprot.readListBegin(); + struct.success = new ArrayList(_list1868.size); + String _elem1869; + for (int _i1870 = 0; _i1870 < _list1868.size; ++_i1870) { - _elem1861 = iprot.readString(); - struct.success.add(_elem1861); + _elem1869 = iprot.readString(); + struct.success.add(_elem1869); } iprot.readListEnd(); } @@ -221835,9 +222121,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, find_columns_with_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1863 : struct.success) + for (String _iter1871 : struct.success) { - oprot.writeString(_iter1863); + oprot.writeString(_iter1871); } oprot.writeListEnd(); } @@ -221868,9 +222154,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, find_columns_with_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1864 : struct.success) + for (String _iter1872 : struct.success) { - oprot.writeString(_iter1864); + oprot.writeString(_iter1872); } } } @@ -221882,13 +222168,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, find_columns_with_st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1865 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1865.size); - String _elem1866; - for (int _i1867 = 0; _i1867 < _list1865.size; ++_i1867) + org.apache.thrift.protocol.TList _list1873 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1873.size); + String _elem1874; + for (int _i1875 = 0; _i1875 < _list1873.size; ++_i1875) { - _elem1866 = iprot.readString(); - struct.success.add(_elem1866); + _elem1874 = iprot.readString(); + struct.success.add(_elem1874); } } struct.setSuccessIsSet(true); @@ -258774,14 +259060,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_all_vers case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1868 = iprot.readListBegin(); - struct.success = new ArrayList(_list1868.size); - SchemaVersion _elem1869; - for (int _i1870 = 0; _i1870 < _list1868.size; ++_i1870) + org.apache.thrift.protocol.TList _list1876 = iprot.readListBegin(); + struct.success = new ArrayList(_list1876.size); + SchemaVersion _elem1877; + for (int _i1878 = 0; _i1878 < _list1876.size; ++_i1878) { - _elem1869 = new SchemaVersion(); - _elem1869.read(iprot); - struct.success.add(_elem1869); + _elem1877 = new SchemaVersion(); + _elem1877.read(iprot); + struct.success.add(_elem1877); } iprot.readListEnd(); } @@ -258825,9 +259111,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_all_ver oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (SchemaVersion _iter1871 : struct.success) + for (SchemaVersion _iter1879 : struct.success) { - _iter1871.write(oprot); + _iter1879.write(oprot); } oprot.writeListEnd(); } @@ -258874,9 +259160,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_all_vers if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (SchemaVersion _iter1872 : struct.success) + for (SchemaVersion _iter1880 : struct.success) { - _iter1872.write(oprot); + _iter1880.write(oprot); } } } @@ -258894,14 +259180,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_all_versi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1873 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1873.size); - SchemaVersion _elem1874; - for (int _i1875 = 0; _i1875 < _list1873.size; ++_i1875) + org.apache.thrift.protocol.TList _list1881 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1881.size); + SchemaVersion _elem1882; + for (int _i1883 = 0; _i1883 < _list1881.size; ++_i1883) { - _elem1874 = new SchemaVersion(); - _elem1874.read(iprot); - struct.success.add(_elem1874); + _elem1882 = new SchemaVersion(); + _elem1882.read(iprot); + struct.success.add(_elem1882); } } struct.setSuccessIsSet(true); @@ -267444,14 +267730,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1876 = iprot.readListBegin(); - struct.success = new ArrayList(_list1876.size); - RuntimeStat _elem1877; - for (int _i1878 = 0; _i1878 < _list1876.size; ++_i1878) + org.apache.thrift.protocol.TList _list1884 = iprot.readListBegin(); + struct.success = new ArrayList(_list1884.size); + RuntimeStat _elem1885; + for (int _i1886 = 0; _i1886 < _list1884.size; ++_i1886) { - _elem1877 = new RuntimeStat(); - _elem1877.read(iprot); - struct.success.add(_elem1877); + _elem1885 = new RuntimeStat(); + _elem1885.read(iprot); + struct.success.add(_elem1885); } iprot.readListEnd(); } @@ -267486,9 +267772,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (RuntimeStat _iter1879 : struct.success) + for (RuntimeStat _iter1887 : struct.success) { - _iter1879.write(oprot); + _iter1887.write(oprot); } oprot.writeListEnd(); } @@ -267527,9 +267813,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (RuntimeStat _iter1880 : struct.success) + for (RuntimeStat _iter1888 : struct.success) { - _iter1880.write(oprot); + _iter1888.write(oprot); } } } @@ -267544,14 +267830,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1881 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1881.size); - RuntimeStat _elem1882; - for (int _i1883 = 0; _i1883 < _list1881.size; ++_i1883) + org.apache.thrift.protocol.TList _list1889 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1889.size); + RuntimeStat _elem1890; + for (int _i1891 = 0; _i1891 < _list1889.size; ++_i1891) { - _elem1882 = new RuntimeStat(); - _elem1882.read(iprot); - struct.success.add(_elem1882); + _elem1890 = new RuntimeStat(); + _elem1890.read(iprot); + struct.success.add(_elem1890); } } struct.setSuccessIsSet(true); @@ -270051,9 +270337,1814 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("scheduled_query_maintenance_result("); + StringBuilder sb = new StringBuilder("scheduled_query_maintenance_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + if (!first) sb.append(", "); + sb.append("o4:"); + if (this.o4 == null) { + sb.append("null"); + } else { + sb.append(this.o4); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class scheduled_query_maintenance_resultStandardSchemeFactory implements SchemeFactory { + public scheduled_query_maintenance_resultStandardScheme getScheme() { + return new scheduled_query_maintenance_resultStandardScheme(); + } + } + + private static class scheduled_query_maintenance_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, scheduled_query_maintenance_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new NoSuchObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new AlreadyExistsException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // O4 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o4 = new InvalidInputException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, scheduled_query_maintenance_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o4 != null) { + oprot.writeFieldBegin(O4_FIELD_DESC); + struct.o4.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class scheduled_query_maintenance_resultTupleSchemeFactory implements SchemeFactory { + public scheduled_query_maintenance_resultTupleScheme getScheme() { + return new scheduled_query_maintenance_resultTupleScheme(); + } + } + + private static class scheduled_query_maintenance_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, scheduled_query_maintenance_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + if (struct.isSetO3()) { + optionals.set(2); + } + if (struct.isSetO4()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + if (struct.isSetO4()) { + struct.o4.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, scheduled_query_maintenance_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(4); + if (incoming.get(0)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new NoSuchObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(2)) { + struct.o3 = new AlreadyExistsException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + if (incoming.get(3)) { + struct.o4 = new InvalidInputException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class scheduled_query_progress_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scheduled_query_progress_args"); + + private static final org.apache.thrift.protocol.TField INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("info", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new scheduled_query_progress_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new scheduled_query_progress_argsTupleSchemeFactory()); + } + + private ScheduledQueryProgressInfo info; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + INFO((short)1, "info"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // INFO + return INFO; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.INFO, new org.apache.thrift.meta_data.FieldMetaData("info", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ScheduledQueryProgressInfo.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scheduled_query_progress_args.class, metaDataMap); + } + + public scheduled_query_progress_args() { + } + + public scheduled_query_progress_args( + ScheduledQueryProgressInfo info) + { + this(); + this.info = info; + } + + /** + * Performs a deep copy on other. + */ + public scheduled_query_progress_args(scheduled_query_progress_args other) { + if (other.isSetInfo()) { + this.info = new ScheduledQueryProgressInfo(other.info); + } + } + + public scheduled_query_progress_args deepCopy() { + return new scheduled_query_progress_args(this); + } + + @Override + public void clear() { + this.info = null; + } + + public ScheduledQueryProgressInfo getInfo() { + return this.info; + } + + public void setInfo(ScheduledQueryProgressInfo info) { + this.info = info; + } + + public void unsetInfo() { + this.info = null; + } + + /** Returns true if field info is set (has been assigned a value) and false otherwise */ + public boolean isSetInfo() { + return this.info != null; + } + + public void setInfoIsSet(boolean value) { + if (!value) { + this.info = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case INFO: + if (value == null) { + unsetInfo(); + } else { + setInfo((ScheduledQueryProgressInfo)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case INFO: + return getInfo(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case INFO: + return isSetInfo(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof scheduled_query_progress_args) + return this.equals((scheduled_query_progress_args)that); + return false; + } + + public boolean equals(scheduled_query_progress_args that) { + if (that == null) + return false; + + boolean this_present_info = true && this.isSetInfo(); + boolean that_present_info = true && that.isSetInfo(); + if (this_present_info || that_present_info) { + if (!(this_present_info && that_present_info)) + return false; + if (!this.info.equals(that.info)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_info = true && (isSetInfo()); + list.add(present_info); + if (present_info) + list.add(info); + + return list.hashCode(); + } + + @Override + public int compareTo(scheduled_query_progress_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetInfo()).compareTo(other.isSetInfo()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetInfo()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.info, other.info); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("scheduled_query_progress_args("); + boolean first = true; + + sb.append("info:"); + if (this.info == null) { + sb.append("null"); + } else { + sb.append(this.info); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (info != null) { + info.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class scheduled_query_progress_argsStandardSchemeFactory implements SchemeFactory { + public scheduled_query_progress_argsStandardScheme getScheme() { + return new scheduled_query_progress_argsStandardScheme(); + } + } + + private static class scheduled_query_progress_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, scheduled_query_progress_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // INFO + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.info = new ScheduledQueryProgressInfo(); + struct.info.read(iprot); + struct.setInfoIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, scheduled_query_progress_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.info != null) { + oprot.writeFieldBegin(INFO_FIELD_DESC); + struct.info.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class scheduled_query_progress_argsTupleSchemeFactory implements SchemeFactory { + public scheduled_query_progress_argsTupleScheme getScheme() { + return new scheduled_query_progress_argsTupleScheme(); + } + } + + private static class scheduled_query_progress_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, scheduled_query_progress_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetInfo()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetInfo()) { + struct.info.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, scheduled_query_progress_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.info = new ScheduledQueryProgressInfo(); + struct.info.read(iprot); + struct.setInfoIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class scheduled_query_progress_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scheduled_query_progress_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new scheduled_query_progress_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new scheduled_query_progress_resultTupleSchemeFactory()); + } + + private MetaException o1; // required + private InvalidOperationException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scheduled_query_progress_result.class, metaDataMap); + } + + public scheduled_query_progress_result() { + } + + public scheduled_query_progress_result( + MetaException o1, + InvalidOperationException o2) + { + this(); + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public scheduled_query_progress_result(scheduled_query_progress_result other) { + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidOperationException(other.o2); + } + } + + public scheduled_query_progress_result deepCopy() { + return new scheduled_query_progress_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public InvalidOperationException getO2() { + return this.o2; + } + + public void setO2(InvalidOperationException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidOperationException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof scheduled_query_progress_result) + return this.equals((scheduled_query_progress_result)that); + return false; + } + + public boolean equals(scheduled_query_progress_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(scheduled_query_progress_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("scheduled_query_progress_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class scheduled_query_progress_resultStandardSchemeFactory implements SchemeFactory { + public scheduled_query_progress_resultStandardScheme getScheme() { + return new scheduled_query_progress_resultStandardScheme(); + } + } + + private static class scheduled_query_progress_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, scheduled_query_progress_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new InvalidOperationException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, scheduled_query_progress_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class scheduled_query_progress_resultTupleSchemeFactory implements SchemeFactory { + public scheduled_query_progress_resultTupleScheme getScheme() { + return new scheduled_query_progress_resultTupleScheme(); + } + } + + private static class scheduled_query_progress_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, scheduled_query_progress_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, scheduled_query_progress_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new InvalidOperationException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_scheduled_query_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_scheduled_query_args"); + + private static final org.apache.thrift.protocol.TField SCHEDULE_KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("scheduleKey", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_scheduled_query_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_scheduled_query_argsTupleSchemeFactory()); + } + + private ScheduledQueryKey scheduleKey; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SCHEDULE_KEY((short)1, "scheduleKey"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // SCHEDULE_KEY + return SCHEDULE_KEY; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SCHEDULE_KEY, new org.apache.thrift.meta_data.FieldMetaData("scheduleKey", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ScheduledQueryKey.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_scheduled_query_args.class, metaDataMap); + } + + public get_scheduled_query_args() { + } + + public get_scheduled_query_args( + ScheduledQueryKey scheduleKey) + { + this(); + this.scheduleKey = scheduleKey; + } + + /** + * Performs a deep copy on other. + */ + public get_scheduled_query_args(get_scheduled_query_args other) { + if (other.isSetScheduleKey()) { + this.scheduleKey = new ScheduledQueryKey(other.scheduleKey); + } + } + + public get_scheduled_query_args deepCopy() { + return new get_scheduled_query_args(this); + } + + @Override + public void clear() { + this.scheduleKey = null; + } + + public ScheduledQueryKey getScheduleKey() { + return this.scheduleKey; + } + + public void setScheduleKey(ScheduledQueryKey scheduleKey) { + this.scheduleKey = scheduleKey; + } + + public void unsetScheduleKey() { + this.scheduleKey = null; + } + + /** Returns true if field scheduleKey is set (has been assigned a value) and false otherwise */ + public boolean isSetScheduleKey() { + return this.scheduleKey != null; + } + + public void setScheduleKeyIsSet(boolean value) { + if (!value) { + this.scheduleKey = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SCHEDULE_KEY: + if (value == null) { + unsetScheduleKey(); + } else { + setScheduleKey((ScheduledQueryKey)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SCHEDULE_KEY: + return getScheduleKey(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SCHEDULE_KEY: + return isSetScheduleKey(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_scheduled_query_args) + return this.equals((get_scheduled_query_args)that); + return false; + } + + public boolean equals(get_scheduled_query_args that) { + if (that == null) + return false; + + boolean this_present_scheduleKey = true && this.isSetScheduleKey(); + boolean that_present_scheduleKey = true && that.isSetScheduleKey(); + if (this_present_scheduleKey || that_present_scheduleKey) { + if (!(this_present_scheduleKey && that_present_scheduleKey)) + return false; + if (!this.scheduleKey.equals(that.scheduleKey)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_scheduleKey = true && (isSetScheduleKey()); + list.add(present_scheduleKey); + if (present_scheduleKey) + list.add(scheduleKey); + + return list.hashCode(); + } + + @Override + public int compareTo(get_scheduled_query_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetScheduleKey()).compareTo(other.isSetScheduleKey()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetScheduleKey()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.scheduleKey, other.scheduleKey); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_scheduled_query_args("); + boolean first = true; + + sb.append("scheduleKey:"); + if (this.scheduleKey == null) { + sb.append("null"); + } else { + sb.append(this.scheduleKey); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (scheduleKey != null) { + scheduleKey.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_scheduled_query_argsStandardSchemeFactory implements SchemeFactory { + public get_scheduled_query_argsStandardScheme getScheme() { + return new get_scheduled_query_argsStandardScheme(); + } + } + + private static class get_scheduled_query_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_scheduled_query_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // SCHEDULE_KEY + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.scheduleKey = new ScheduledQueryKey(); + struct.scheduleKey.read(iprot); + struct.setScheduleKeyIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_scheduled_query_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.scheduleKey != null) { + oprot.writeFieldBegin(SCHEDULE_KEY_FIELD_DESC); + struct.scheduleKey.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_scheduled_query_argsTupleSchemeFactory implements SchemeFactory { + public get_scheduled_query_argsTupleScheme getScheme() { + return new get_scheduled_query_argsTupleScheme(); + } + } + + private static class get_scheduled_query_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_scheduled_query_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetScheduleKey()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetScheduleKey()) { + struct.scheduleKey.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_scheduled_query_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.scheduleKey = new ScheduledQueryKey(); + struct.scheduleKey.read(iprot); + struct.setScheduleKeyIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_scheduled_query_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_scheduled_query_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_scheduled_query_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_scheduled_query_resultTupleSchemeFactory()); + } + + private ScheduledQuery success; // required + private MetaException o1; // required + private NoSuchObjectException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ScheduledQuery.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_scheduled_query_result.class, metaDataMap); + } + + public get_scheduled_query_result() { + } + + public get_scheduled_query_result( + ScheduledQuery success, + MetaException o1, + NoSuchObjectException o2) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public get_scheduled_query_result(get_scheduled_query_result other) { + if (other.isSetSuccess()) { + this.success = new ScheduledQuery(other.success); + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new NoSuchObjectException(other.o2); + } + } + + public get_scheduled_query_result deepCopy() { + return new get_scheduled_query_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + } + + public ScheduledQuery getSuccess() { + return this.success; + } + + public void setSuccess(ScheduledQuery success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public NoSuchObjectException getO2() { + return this.o2; + } + + public void setO2(NoSuchObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((ScheduledQuery)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((NoSuchObjectException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_scheduled_query_result) + return this.equals((get_scheduled_query_result)that); + return false; + } + + public boolean equals(get_scheduled_query_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(get_scheduled_query_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_scheduled_query_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); sb.append("o1:"); if (this.o1 == null) { sb.append("null"); @@ -270069,22 +272160,6 @@ public String toString() { sb.append(this.o2); } first = false; - if (!first) sb.append(", "); - sb.append("o3:"); - if (this.o3 == null) { - sb.append("null"); - } else { - sb.append(this.o3); - } - first = false; - if (!first) sb.append(", "); - sb.append("o4:"); - if (this.o4 == null) { - sb.append("null"); - } else { - sb.append(this.o4); - } - first = false; sb.append(")"); return sb.toString(); } @@ -270092,6 +272167,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -270110,15 +272188,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class scheduled_query_maintenance_resultStandardSchemeFactory implements SchemeFactory { - public scheduled_query_maintenance_resultStandardScheme getScheme() { - return new scheduled_query_maintenance_resultStandardScheme(); + private static class get_scheduled_query_resultStandardSchemeFactory implements SchemeFactory { + public get_scheduled_query_resultStandardScheme getScheme() { + return new get_scheduled_query_resultStandardScheme(); } } - private static class scheduled_query_maintenance_resultStandardScheme extends StandardScheme { + private static class get_scheduled_query_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, scheduled_query_maintenance_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_scheduled_query_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -270128,6 +272206,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scheduled_query_mai break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new ScheduledQuery(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.o1 = new MetaException(); @@ -270146,24 +272233,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scheduled_query_mai org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // O3 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o3 = new AlreadyExistsException(); - struct.o3.read(iprot); - struct.setO3IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // O4 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o4 = new InvalidInputException(); - struct.o4.read(iprot); - struct.setO4IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -270173,10 +272242,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scheduled_query_mai struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, scheduled_query_maintenance_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_scheduled_query_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } if (struct.o1 != null) { oprot.writeFieldBegin(O1_FIELD_DESC); struct.o1.write(oprot); @@ -270187,106 +272261,85 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scheduled_query_ma struct.o2.write(oprot); oprot.writeFieldEnd(); } - if (struct.o3 != null) { - oprot.writeFieldBegin(O3_FIELD_DESC); - struct.o3.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.o4 != null) { - oprot.writeFieldBegin(O4_FIELD_DESC); - struct.o4.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class scheduled_query_maintenance_resultTupleSchemeFactory implements SchemeFactory { - public scheduled_query_maintenance_resultTupleScheme getScheme() { - return new scheduled_query_maintenance_resultTupleScheme(); + private static class get_scheduled_query_resultTupleSchemeFactory implements SchemeFactory { + public get_scheduled_query_resultTupleScheme getScheme() { + return new get_scheduled_query_resultTupleScheme(); } } - private static class scheduled_query_maintenance_resultTupleScheme extends TupleScheme { + private static class get_scheduled_query_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, scheduled_query_maintenance_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_scheduled_query_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetO1()) { + if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO2()) { + if (struct.isSetO1()) { optionals.set(1); } - if (struct.isSetO3()) { + if (struct.isSetO2()) { optionals.set(2); } - if (struct.isSetO4()) { - optionals.set(3); + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + struct.success.write(oprot); } - oprot.writeBitSet(optionals, 4); if (struct.isSetO1()) { struct.o1.write(oprot); } if (struct.isSetO2()) { struct.o2.write(oprot); } - if (struct.isSetO3()) { - struct.o3.write(oprot); - } - if (struct.isSetO4()) { - struct.o4.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, scheduled_query_maintenance_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_scheduled_query_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { + struct.success = new ScheduledQuery(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } - if (incoming.get(2)) { - struct.o3 = new AlreadyExistsException(); - struct.o3.read(iprot); - struct.setO3IsSet(true); - } - if (incoming.get(3)) { - struct.o4 = new InvalidInputException(); - struct.o4.read(iprot); - struct.setO4IsSet(true); - } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class scheduled_query_progress_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scheduled_query_progress_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_replication_metrics_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_replication_metrics_args"); - private static final org.apache.thrift.protocol.TField INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("info", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField REPLICATION_METRIC_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("replicationMetricList", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new scheduled_query_progress_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new scheduled_query_progress_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new add_replication_metrics_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_replication_metrics_argsTupleSchemeFactory()); } - private ScheduledQueryProgressInfo info; // required + private ReplicationMetricList replicationMetricList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - INFO((short)1, "info"); + REPLICATION_METRIC_LIST((short)1, "replicationMetricList"); private static final Map byName = new HashMap(); @@ -270301,8 +272354,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scheduled_query_main */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // INFO - return INFO; + case 1: // REPLICATION_METRIC_LIST + return REPLICATION_METRIC_LIST; default: return null; } @@ -270346,70 +272399,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.INFO, new org.apache.thrift.meta_data.FieldMetaData("info", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ScheduledQueryProgressInfo.class))); + tmpMap.put(_Fields.REPLICATION_METRIC_LIST, new org.apache.thrift.meta_data.FieldMetaData("replicationMetricList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ReplicationMetricList.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scheduled_query_progress_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_replication_metrics_args.class, metaDataMap); } - public scheduled_query_progress_args() { + public add_replication_metrics_args() { } - public scheduled_query_progress_args( - ScheduledQueryProgressInfo info) + public add_replication_metrics_args( + ReplicationMetricList replicationMetricList) { this(); - this.info = info; + this.replicationMetricList = replicationMetricList; } /** * Performs a deep copy on other. */ - public scheduled_query_progress_args(scheduled_query_progress_args other) { - if (other.isSetInfo()) { - this.info = new ScheduledQueryProgressInfo(other.info); + public add_replication_metrics_args(add_replication_metrics_args other) { + if (other.isSetReplicationMetricList()) { + this.replicationMetricList = new ReplicationMetricList(other.replicationMetricList); } } - public scheduled_query_progress_args deepCopy() { - return new scheduled_query_progress_args(this); + public add_replication_metrics_args deepCopy() { + return new add_replication_metrics_args(this); } @Override public void clear() { - this.info = null; + this.replicationMetricList = null; } - public ScheduledQueryProgressInfo getInfo() { - return this.info; + public ReplicationMetricList getReplicationMetricList() { + return this.replicationMetricList; } - public void setInfo(ScheduledQueryProgressInfo info) { - this.info = info; + public void setReplicationMetricList(ReplicationMetricList replicationMetricList) { + this.replicationMetricList = replicationMetricList; } - public void unsetInfo() { - this.info = null; + public void unsetReplicationMetricList() { + this.replicationMetricList = null; } - /** Returns true if field info is set (has been assigned a value) and false otherwise */ - public boolean isSetInfo() { - return this.info != null; + /** Returns true if field replicationMetricList is set (has been assigned a value) and false otherwise */ + public boolean isSetReplicationMetricList() { + return this.replicationMetricList != null; } - public void setInfoIsSet(boolean value) { + public void setReplicationMetricListIsSet(boolean value) { if (!value) { - this.info = null; + this.replicationMetricList = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case INFO: + case REPLICATION_METRIC_LIST: if (value == null) { - unsetInfo(); + unsetReplicationMetricList(); } else { - setInfo((ScheduledQueryProgressInfo)value); + setReplicationMetricList((ReplicationMetricList)value); } break; @@ -270418,8 +272471,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case INFO: - return getInfo(); + case REPLICATION_METRIC_LIST: + return getReplicationMetricList(); } throw new IllegalStateException(); @@ -270432,8 +272485,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case INFO: - return isSetInfo(); + case REPLICATION_METRIC_LIST: + return isSetReplicationMetricList(); } throw new IllegalStateException(); } @@ -270442,21 +272495,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof scheduled_query_progress_args) - return this.equals((scheduled_query_progress_args)that); + if (that instanceof add_replication_metrics_args) + return this.equals((add_replication_metrics_args)that); return false; } - public boolean equals(scheduled_query_progress_args that) { + public boolean equals(add_replication_metrics_args that) { if (that == null) return false; - boolean this_present_info = true && this.isSetInfo(); - boolean that_present_info = true && that.isSetInfo(); - if (this_present_info || that_present_info) { - if (!(this_present_info && that_present_info)) + boolean this_present_replicationMetricList = true && this.isSetReplicationMetricList(); + boolean that_present_replicationMetricList = true && that.isSetReplicationMetricList(); + if (this_present_replicationMetricList || that_present_replicationMetricList) { + if (!(this_present_replicationMetricList && that_present_replicationMetricList)) return false; - if (!this.info.equals(that.info)) + if (!this.replicationMetricList.equals(that.replicationMetricList)) return false; } @@ -270467,28 +272520,28 @@ public boolean equals(scheduled_query_progress_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_info = true && (isSetInfo()); - list.add(present_info); - if (present_info) - list.add(info); + boolean present_replicationMetricList = true && (isSetReplicationMetricList()); + list.add(present_replicationMetricList); + if (present_replicationMetricList) + list.add(replicationMetricList); return list.hashCode(); } @Override - public int compareTo(scheduled_query_progress_args other) { + public int compareTo(add_replication_metrics_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetInfo()).compareTo(other.isSetInfo()); + lastComparison = Boolean.valueOf(isSetReplicationMetricList()).compareTo(other.isSetReplicationMetricList()); if (lastComparison != 0) { return lastComparison; } - if (isSetInfo()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.info, other.info); + if (isSetReplicationMetricList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.replicationMetricList, other.replicationMetricList); if (lastComparison != 0) { return lastComparison; } @@ -270510,14 +272563,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("scheduled_query_progress_args("); + StringBuilder sb = new StringBuilder("add_replication_metrics_args("); boolean first = true; - sb.append("info:"); - if (this.info == null) { + sb.append("replicationMetricList:"); + if (this.replicationMetricList == null) { sb.append("null"); } else { - sb.append(this.info); + sb.append(this.replicationMetricList); } first = false; sb.append(")"); @@ -270527,8 +272580,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (info != null) { - info.validate(); + if (replicationMetricList != null) { + replicationMetricList.validate(); } } @@ -270548,15 +272601,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class scheduled_query_progress_argsStandardSchemeFactory implements SchemeFactory { - public scheduled_query_progress_argsStandardScheme getScheme() { - return new scheduled_query_progress_argsStandardScheme(); + private static class add_replication_metrics_argsStandardSchemeFactory implements SchemeFactory { + public add_replication_metrics_argsStandardScheme getScheme() { + return new add_replication_metrics_argsStandardScheme(); } } - private static class scheduled_query_progress_argsStandardScheme extends StandardScheme { + private static class add_replication_metrics_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, scheduled_query_progress_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, add_replication_metrics_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -270566,11 +272619,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scheduled_query_pro break; } switch (schemeField.id) { - case 1: // INFO + case 1: // REPLICATION_METRIC_LIST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.info = new ScheduledQueryProgressInfo(); - struct.info.read(iprot); - struct.setInfoIsSet(true); + struct.replicationMetricList = new ReplicationMetricList(); + struct.replicationMetricList.read(iprot); + struct.setReplicationMetricListIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -270584,13 +272637,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scheduled_query_pro struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, scheduled_query_progress_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, add_replication_metrics_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.info != null) { - oprot.writeFieldBegin(INFO_FIELD_DESC); - struct.info.write(oprot); + if (struct.replicationMetricList != null) { + oprot.writeFieldBegin(REPLICATION_METRIC_LIST_FIELD_DESC); + struct.replicationMetricList.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -270599,60 +272652,57 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scheduled_query_pr } - private static class scheduled_query_progress_argsTupleSchemeFactory implements SchemeFactory { - public scheduled_query_progress_argsTupleScheme getScheme() { - return new scheduled_query_progress_argsTupleScheme(); + private static class add_replication_metrics_argsTupleSchemeFactory implements SchemeFactory { + public add_replication_metrics_argsTupleScheme getScheme() { + return new add_replication_metrics_argsTupleScheme(); } } - private static class scheduled_query_progress_argsTupleScheme extends TupleScheme { + private static class add_replication_metrics_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, scheduled_query_progress_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, add_replication_metrics_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetInfo()) { + if (struct.isSetReplicationMetricList()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetInfo()) { - struct.info.write(oprot); + if (struct.isSetReplicationMetricList()) { + struct.replicationMetricList.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, scheduled_query_progress_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, add_replication_metrics_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.info = new ScheduledQueryProgressInfo(); - struct.info.read(iprot); - struct.setInfoIsSet(true); + struct.replicationMetricList = new ReplicationMetricList(); + struct.replicationMetricList.read(iprot); + struct.setReplicationMetricListIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class scheduled_query_progress_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scheduled_query_progress_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_replication_metrics_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_replication_metrics_result"); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new scheduled_query_progress_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new scheduled_query_progress_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new add_replication_metrics_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_replication_metrics_resultTupleSchemeFactory()); } private MetaException o1; // required - private InvalidOperationException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - O1((short)1, "o1"), - O2((short)2, "o2"); + O1((short)1, "o1"); private static final Map byName = new HashMap(); @@ -270669,8 +272719,6 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // O1 return O1; - case 2: // O2 - return O2; default: return null; } @@ -270716,44 +272764,36 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); - tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scheduled_query_progress_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_replication_metrics_result.class, metaDataMap); } - public scheduled_query_progress_result() { + public add_replication_metrics_result() { } - public scheduled_query_progress_result( - MetaException o1, - InvalidOperationException o2) + public add_replication_metrics_result( + MetaException o1) { this(); this.o1 = o1; - this.o2 = o2; } /** * Performs a deep copy on other. */ - public scheduled_query_progress_result(scheduled_query_progress_result other) { + public add_replication_metrics_result(add_replication_metrics_result other) { if (other.isSetO1()) { this.o1 = new MetaException(other.o1); } - if (other.isSetO2()) { - this.o2 = new InvalidOperationException(other.o2); - } } - public scheduled_query_progress_result deepCopy() { - return new scheduled_query_progress_result(this); + public add_replication_metrics_result deepCopy() { + return new add_replication_metrics_result(this); } @Override public void clear() { this.o1 = null; - this.o2 = null; } public MetaException getO1() { @@ -270779,29 +272819,6 @@ public void setO1IsSet(boolean value) { } } - public InvalidOperationException getO2() { - return this.o2; - } - - public void setO2(InvalidOperationException o2) { - this.o2 = o2; - } - - public void unsetO2() { - this.o2 = null; - } - - /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ - public boolean isSetO2() { - return this.o2 != null; - } - - public void setO2IsSet(boolean value) { - if (!value) { - this.o2 = null; - } - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case O1: @@ -270812,14 +272829,6 @@ public void setFieldValue(_Fields field, Object value) { } break; - case O2: - if (value == null) { - unsetO2(); - } else { - setO2((InvalidOperationException)value); - } - break; - } } @@ -270828,9 +272837,6 @@ public Object getFieldValue(_Fields field) { case O1: return getO1(); - case O2: - return getO2(); - } throw new IllegalStateException(); } @@ -270844,8 +272850,6 @@ public boolean isSet(_Fields field) { switch (field) { case O1: return isSetO1(); - case O2: - return isSetO2(); } throw new IllegalStateException(); } @@ -270854,12 +272858,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof scheduled_query_progress_result) - return this.equals((scheduled_query_progress_result)that); + if (that instanceof add_replication_metrics_result) + return this.equals((add_replication_metrics_result)that); return false; } - public boolean equals(scheduled_query_progress_result that) { + public boolean equals(add_replication_metrics_result that) { if (that == null) return false; @@ -270872,15 +272876,6 @@ public boolean equals(scheduled_query_progress_result that) { return false; } - boolean this_present_o2 = true && this.isSetO2(); - boolean that_present_o2 = true && that.isSetO2(); - if (this_present_o2 || that_present_o2) { - if (!(this_present_o2 && that_present_o2)) - return false; - if (!this.o2.equals(that.o2)) - return false; - } - return true; } @@ -270893,16 +272888,11 @@ public int hashCode() { if (present_o1) list.add(o1); - boolean present_o2 = true && (isSetO2()); - list.add(present_o2); - if (present_o2) - list.add(o2); - return list.hashCode(); } @Override - public int compareTo(scheduled_query_progress_result other) { + public int compareTo(add_replication_metrics_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -270919,16 +272909,6 @@ public int compareTo(scheduled_query_progress_result other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO2()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -270946,7 +272926,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("scheduled_query_progress_result("); + StringBuilder sb = new StringBuilder("add_replication_metrics_result("); boolean first = true; sb.append("o1:"); @@ -270956,14 +272936,6 @@ public String toString() { sb.append(this.o1); } first = false; - if (!first) sb.append(", "); - sb.append("o2:"); - if (this.o2 == null) { - sb.append("null"); - } else { - sb.append(this.o2); - } - first = false; sb.append(")"); return sb.toString(); } @@ -270989,15 +272961,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class scheduled_query_progress_resultStandardSchemeFactory implements SchemeFactory { - public scheduled_query_progress_resultStandardScheme getScheme() { - return new scheduled_query_progress_resultStandardScheme(); + private static class add_replication_metrics_resultStandardSchemeFactory implements SchemeFactory { + public add_replication_metrics_resultStandardScheme getScheme() { + return new add_replication_metrics_resultStandardScheme(); } } - private static class scheduled_query_progress_resultStandardScheme extends StandardScheme { + private static class add_replication_metrics_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, scheduled_query_progress_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, add_replication_metrics_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -271016,15 +272988,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scheduled_query_pro org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // O2 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new InvalidOperationException(); - struct.o2.read(iprot); - struct.setO2IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -271034,7 +272997,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scheduled_query_pro struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, scheduled_query_progress_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, add_replication_metrics_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -271043,79 +273006,63 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scheduled_query_pr struct.o1.write(oprot); oprot.writeFieldEnd(); } - if (struct.o2 != null) { - oprot.writeFieldBegin(O2_FIELD_DESC); - struct.o2.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class scheduled_query_progress_resultTupleSchemeFactory implements SchemeFactory { - public scheduled_query_progress_resultTupleScheme getScheme() { - return new scheduled_query_progress_resultTupleScheme(); + private static class add_replication_metrics_resultTupleSchemeFactory implements SchemeFactory { + public add_replication_metrics_resultTupleScheme getScheme() { + return new add_replication_metrics_resultTupleScheme(); } } - private static class scheduled_query_progress_resultTupleScheme extends TupleScheme { + private static class add_replication_metrics_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, scheduled_query_progress_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, add_replication_metrics_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetO1()) { optionals.set(0); } - if (struct.isSetO2()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); + oprot.writeBitSet(optionals, 1); if (struct.isSetO1()) { struct.o1.write(oprot); } - if (struct.isSetO2()) { - struct.o2.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, scheduled_query_progress_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, add_replication_metrics_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); } - if (incoming.get(1)) { - struct.o2 = new InvalidOperationException(); - struct.o2.read(iprot); - struct.setO2IsSet(true); - } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_scheduled_query_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_scheduled_query_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_replication_metrics_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_replication_metrics_args"); - private static final org.apache.thrift.protocol.TField SCHEDULE_KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("scheduleKey", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_scheduled_query_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_scheduled_query_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_replication_metrics_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_replication_metrics_argsTupleSchemeFactory()); } - private ScheduledQueryKey scheduleKey; // required + private GetReplicationMetricsRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SCHEDULE_KEY((short)1, "scheduleKey"); + RQST((short)1, "rqst"); private static final Map byName = new HashMap(); @@ -271130,8 +273077,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scheduled_query_prog */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // SCHEDULE_KEY - return SCHEDULE_KEY; + case 1: // RQST + return RQST; default: return null; } @@ -271175,70 +273122,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SCHEDULE_KEY, new org.apache.thrift.meta_data.FieldMetaData("scheduleKey", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ScheduledQueryKey.class))); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetReplicationMetricsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_scheduled_query_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_replication_metrics_args.class, metaDataMap); } - public get_scheduled_query_args() { + public get_replication_metrics_args() { } - public get_scheduled_query_args( - ScheduledQueryKey scheduleKey) + public get_replication_metrics_args( + GetReplicationMetricsRequest rqst) { this(); - this.scheduleKey = scheduleKey; + this.rqst = rqst; } /** * Performs a deep copy on other. */ - public get_scheduled_query_args(get_scheduled_query_args other) { - if (other.isSetScheduleKey()) { - this.scheduleKey = new ScheduledQueryKey(other.scheduleKey); + public get_replication_metrics_args(get_replication_metrics_args other) { + if (other.isSetRqst()) { + this.rqst = new GetReplicationMetricsRequest(other.rqst); } } - public get_scheduled_query_args deepCopy() { - return new get_scheduled_query_args(this); + public get_replication_metrics_args deepCopy() { + return new get_replication_metrics_args(this); } @Override public void clear() { - this.scheduleKey = null; + this.rqst = null; } - public ScheduledQueryKey getScheduleKey() { - return this.scheduleKey; + public GetReplicationMetricsRequest getRqst() { + return this.rqst; } - public void setScheduleKey(ScheduledQueryKey scheduleKey) { - this.scheduleKey = scheduleKey; + public void setRqst(GetReplicationMetricsRequest rqst) { + this.rqst = rqst; } - public void unsetScheduleKey() { - this.scheduleKey = null; + public void unsetRqst() { + this.rqst = null; } - /** Returns true if field scheduleKey is set (has been assigned a value) and false otherwise */ - public boolean isSetScheduleKey() { - return this.scheduleKey != null; + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; } - public void setScheduleKeyIsSet(boolean value) { + public void setRqstIsSet(boolean value) { if (!value) { - this.scheduleKey = null; + this.rqst = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case SCHEDULE_KEY: + case RQST: if (value == null) { - unsetScheduleKey(); + unsetRqst(); } else { - setScheduleKey((ScheduledQueryKey)value); + setRqst((GetReplicationMetricsRequest)value); } break; @@ -271247,8 +273194,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case SCHEDULE_KEY: - return getScheduleKey(); + case RQST: + return getRqst(); } throw new IllegalStateException(); @@ -271261,8 +273208,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case SCHEDULE_KEY: - return isSetScheduleKey(); + case RQST: + return isSetRqst(); } throw new IllegalStateException(); } @@ -271271,21 +273218,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_scheduled_query_args) - return this.equals((get_scheduled_query_args)that); + if (that instanceof get_replication_metrics_args) + return this.equals((get_replication_metrics_args)that); return false; } - public boolean equals(get_scheduled_query_args that) { + public boolean equals(get_replication_metrics_args that) { if (that == null) return false; - boolean this_present_scheduleKey = true && this.isSetScheduleKey(); - boolean that_present_scheduleKey = true && that.isSetScheduleKey(); - if (this_present_scheduleKey || that_present_scheduleKey) { - if (!(this_present_scheduleKey && that_present_scheduleKey)) + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) return false; - if (!this.scheduleKey.equals(that.scheduleKey)) + if (!this.rqst.equals(that.rqst)) return false; } @@ -271296,28 +273243,28 @@ public boolean equals(get_scheduled_query_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_scheduleKey = true && (isSetScheduleKey()); - list.add(present_scheduleKey); - if (present_scheduleKey) - list.add(scheduleKey); + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); return list.hashCode(); } @Override - public int compareTo(get_scheduled_query_args other) { + public int compareTo(get_replication_metrics_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetScheduleKey()).compareTo(other.isSetScheduleKey()); + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); if (lastComparison != 0) { return lastComparison; } - if (isSetScheduleKey()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.scheduleKey, other.scheduleKey); + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); if (lastComparison != 0) { return lastComparison; } @@ -271339,14 +273286,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_scheduled_query_args("); + StringBuilder sb = new StringBuilder("get_replication_metrics_args("); boolean first = true; - sb.append("scheduleKey:"); - if (this.scheduleKey == null) { + sb.append("rqst:"); + if (this.rqst == null) { sb.append("null"); } else { - sb.append(this.scheduleKey); + sb.append(this.rqst); } first = false; sb.append(")"); @@ -271356,8 +273303,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (scheduleKey != null) { - scheduleKey.validate(); + if (rqst != null) { + rqst.validate(); } } @@ -271377,15 +273324,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_scheduled_query_argsStandardSchemeFactory implements SchemeFactory { - public get_scheduled_query_argsStandardScheme getScheme() { - return new get_scheduled_query_argsStandardScheme(); + private static class get_replication_metrics_argsStandardSchemeFactory implements SchemeFactory { + public get_replication_metrics_argsStandardScheme getScheme() { + return new get_replication_metrics_argsStandardScheme(); } } - private static class get_scheduled_query_argsStandardScheme extends StandardScheme { + private static class get_replication_metrics_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_scheduled_query_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_replication_metrics_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -271395,11 +273342,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_scheduled_query break; } switch (schemeField.id) { - case 1: // SCHEDULE_KEY + case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.scheduleKey = new ScheduledQueryKey(); - struct.scheduleKey.read(iprot); - struct.setScheduleKeyIsSet(true); + struct.rqst = new GetReplicationMetricsRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -271413,13 +273360,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_scheduled_query struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_scheduled_query_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_replication_metrics_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.scheduleKey != null) { - oprot.writeFieldBegin(SCHEDULE_KEY_FIELD_DESC); - struct.scheduleKey.write(oprot); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -271428,63 +273375,60 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_scheduled_quer } - private static class get_scheduled_query_argsTupleSchemeFactory implements SchemeFactory { - public get_scheduled_query_argsTupleScheme getScheme() { - return new get_scheduled_query_argsTupleScheme(); + private static class get_replication_metrics_argsTupleSchemeFactory implements SchemeFactory { + public get_replication_metrics_argsTupleScheme getScheme() { + return new get_replication_metrics_argsTupleScheme(); } } - private static class get_scheduled_query_argsTupleScheme extends TupleScheme { + private static class get_replication_metrics_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_scheduled_query_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_replication_metrics_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetScheduleKey()) { + if (struct.isSetRqst()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetScheduleKey()) { - struct.scheduleKey.write(oprot); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_scheduled_query_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_replication_metrics_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.scheduleKey = new ScheduledQueryKey(); - struct.scheduleKey.read(iprot); - struct.setScheduleKeyIsSet(true); + struct.rqst = new GetReplicationMetricsRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_scheduled_query_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_scheduled_query_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_replication_metrics_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_replication_metrics_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_scheduled_query_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_scheduled_query_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_replication_metrics_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_replication_metrics_resultTupleSchemeFactory()); } - private ScheduledQuery success; // required + private ReplicationMetricList success; // required private MetaException o1; // required - private NoSuchObjectException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), - O1((short)1, "o1"), - O2((short)2, "o2"); + O1((short)1, "o1"); private static final Map byName = new HashMap(); @@ -271503,8 +273447,6 @@ public static _Fields findByThriftId(int fieldId) { return SUCCESS; case 1: // O1 return O1; - case 2: // O2 - return O2; default: return null; } @@ -271549,60 +273491,52 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ScheduledQuery.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ReplicationMetricList.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); - tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_scheduled_query_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_replication_metrics_result.class, metaDataMap); } - public get_scheduled_query_result() { + public get_replication_metrics_result() { } - public get_scheduled_query_result( - ScheduledQuery success, - MetaException o1, - NoSuchObjectException o2) + public get_replication_metrics_result( + ReplicationMetricList success, + MetaException o1) { this(); this.success = success; this.o1 = o1; - this.o2 = o2; } /** * Performs a deep copy on other. */ - public get_scheduled_query_result(get_scheduled_query_result other) { + public get_replication_metrics_result(get_replication_metrics_result other) { if (other.isSetSuccess()) { - this.success = new ScheduledQuery(other.success); + this.success = new ReplicationMetricList(other.success); } if (other.isSetO1()) { this.o1 = new MetaException(other.o1); } - if (other.isSetO2()) { - this.o2 = new NoSuchObjectException(other.o2); - } } - public get_scheduled_query_result deepCopy() { - return new get_scheduled_query_result(this); + public get_replication_metrics_result deepCopy() { + return new get_replication_metrics_result(this); } @Override public void clear() { this.success = null; this.o1 = null; - this.o2 = null; } - public ScheduledQuery getSuccess() { + public ReplicationMetricList getSuccess() { return this.success; } - public void setSuccess(ScheduledQuery success) { + public void setSuccess(ReplicationMetricList success) { this.success = success; } @@ -271644,36 +273578,13 @@ public void setO1IsSet(boolean value) { } } - public NoSuchObjectException getO2() { - return this.o2; - } - - public void setO2(NoSuchObjectException o2) { - this.o2 = o2; - } - - public void unsetO2() { - this.o2 = null; - } - - /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ - public boolean isSetO2() { - return this.o2 != null; - } - - public void setO2IsSet(boolean value) { - if (!value) { - this.o2 = null; - } - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case SUCCESS: if (value == null) { unsetSuccess(); } else { - setSuccess((ScheduledQuery)value); + setSuccess((ReplicationMetricList)value); } break; @@ -271685,14 +273596,6 @@ public void setFieldValue(_Fields field, Object value) { } break; - case O2: - if (value == null) { - unsetO2(); - } else { - setO2((NoSuchObjectException)value); - } - break; - } } @@ -271704,9 +273607,6 @@ public Object getFieldValue(_Fields field) { case O1: return getO1(); - case O2: - return getO2(); - } throw new IllegalStateException(); } @@ -271722,8 +273622,6 @@ public boolean isSet(_Fields field) { return isSetSuccess(); case O1: return isSetO1(); - case O2: - return isSetO2(); } throw new IllegalStateException(); } @@ -271732,12 +273630,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_scheduled_query_result) - return this.equals((get_scheduled_query_result)that); + if (that instanceof get_replication_metrics_result) + return this.equals((get_replication_metrics_result)that); return false; } - public boolean equals(get_scheduled_query_result that) { + public boolean equals(get_replication_metrics_result that) { if (that == null) return false; @@ -271759,15 +273657,6 @@ public boolean equals(get_scheduled_query_result that) { return false; } - boolean this_present_o2 = true && this.isSetO2(); - boolean that_present_o2 = true && that.isSetO2(); - if (this_present_o2 || that_present_o2) { - if (!(this_present_o2 && that_present_o2)) - return false; - if (!this.o2.equals(that.o2)) - return false; - } - return true; } @@ -271785,16 +273674,11 @@ public int hashCode() { if (present_o1) list.add(o1); - boolean present_o2 = true && (isSetO2()); - list.add(present_o2); - if (present_o2) - list.add(o2); - return list.hashCode(); } @Override - public int compareTo(get_scheduled_query_result other) { + public int compareTo(get_replication_metrics_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -271821,16 +273705,6 @@ public int compareTo(get_scheduled_query_result other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO2()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -271848,7 +273722,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_scheduled_query_result("); + StringBuilder sb = new StringBuilder("get_replication_metrics_result("); boolean first = true; sb.append("success:"); @@ -271866,14 +273740,6 @@ public String toString() { sb.append(this.o1); } first = false; - if (!first) sb.append(", "); - sb.append("o2:"); - if (this.o2 == null) { - sb.append("null"); - } else { - sb.append(this.o2); - } - first = false; sb.append(")"); return sb.toString(); } @@ -271902,15 +273768,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_scheduled_query_resultStandardSchemeFactory implements SchemeFactory { - public get_scheduled_query_resultStandardScheme getScheme() { - return new get_scheduled_query_resultStandardScheme(); + private static class get_replication_metrics_resultStandardSchemeFactory implements SchemeFactory { + public get_replication_metrics_resultStandardScheme getScheme() { + return new get_replication_metrics_resultStandardScheme(); } } - private static class get_scheduled_query_resultStandardScheme extends StandardScheme { + private static class get_replication_metrics_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_scheduled_query_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_replication_metrics_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -271922,7 +273788,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_scheduled_query switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new ScheduledQuery(); + struct.success = new ReplicationMetricList(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -271938,15 +273804,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_scheduled_query org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // O2 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new NoSuchObjectException(); - struct.o2.read(iprot); - struct.setO2IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -271956,7 +273813,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_scheduled_query struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_scheduled_query_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_replication_metrics_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -271970,27 +273827,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_scheduled_quer struct.o1.write(oprot); oprot.writeFieldEnd(); } - if (struct.o2 != null) { - oprot.writeFieldBegin(O2_FIELD_DESC); - struct.o2.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_scheduled_query_resultTupleSchemeFactory implements SchemeFactory { - public get_scheduled_query_resultTupleScheme getScheme() { - return new get_scheduled_query_resultTupleScheme(); + private static class get_replication_metrics_resultTupleSchemeFactory implements SchemeFactory { + public get_replication_metrics_resultTupleScheme getScheme() { + return new get_replication_metrics_resultTupleScheme(); } } - private static class get_scheduled_query_resultTupleScheme extends TupleScheme { + private static class get_replication_metrics_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_scheduled_query_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_replication_metrics_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -271999,27 +273851,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_scheduled_query if (struct.isSetO1()) { optionals.set(1); } - if (struct.isSetO2()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); + oprot.writeBitSet(optionals, 2); if (struct.isSetSuccess()) { struct.success.write(oprot); } if (struct.isSetO1()) { struct.o1.write(oprot); } - if (struct.isSetO2()) { - struct.o2.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_scheduled_query_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_replication_metrics_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { - struct.success = new ScheduledQuery(); + struct.success = new ReplicationMetricList(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -272028,11 +273874,6 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_scheduled_query_ struct.o1.read(iprot); struct.setO1IsSet(true); } - if (incoming.get(2)) { - struct.o2 = new NoSuchObjectException(); - struct.o2.read(iprot); - struct.setO2IsSet(true); - } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index a20cb23b87..e59a205b00 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -1770,6 +1770,17 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @throws \metastore\NoSuchObjectException */ public function get_scheduled_query(\metastore\ScheduledQueryKey $scheduleKey); + /** + * @param \metastore\ReplicationMetricList $replicationMetricList + * @throws \metastore\MetaException + */ + public function add_replication_metrics(\metastore\ReplicationMetricList $replicationMetricList); + /** + * @param \metastore\GetReplicationMetricsRequest $rqst + * @return \metastore\ReplicationMetricList + * @throws \metastore\MetaException + */ + public function get_replication_metrics(\metastore\GetReplicationMetricsRequest $rqst); } class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf { @@ -15297,6 +15308,111 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_scheduled_query failed: unknown result"); } + public function add_replication_metrics(\metastore\ReplicationMetricList $replicationMetricList) + { + $this->send_add_replication_metrics($replicationMetricList); + $this->recv_add_replication_metrics(); + } + + public function send_add_replication_metrics(\metastore\ReplicationMetricList $replicationMetricList) + { + $args = new \metastore\ThriftHiveMetastore_add_replication_metrics_args(); + $args->replicationMetricList = $replicationMetricList; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'add_replication_metrics', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('add_replication_metrics', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_add_replication_metrics() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_add_replication_metrics_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_add_replication_metrics_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + return; + } + + public function get_replication_metrics(\metastore\GetReplicationMetricsRequest $rqst) + { + $this->send_get_replication_metrics($rqst); + return $this->recv_get_replication_metrics(); + } + + public function send_get_replication_metrics(\metastore\GetReplicationMetricsRequest $rqst) + { + $args = new \metastore\ThriftHiveMetastore_get_replication_metrics_args(); + $args->rqst = $rqst; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_replication_metrics', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_replication_metrics', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_replication_metrics() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_replication_metrics_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_replication_metrics_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + throw new \Exception("get_replication_metrics failed: unknown result"); + } + } // HELPER FUNCTIONS AND STRUCTURES @@ -17626,182 +17742,6 @@ class ThriftHiveMetastore_get_databases_result { return 'ThriftHiveMetastore_get_databases_result'; } - public function read($input) - { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) - { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 0: - if ($ftype == TType::LST) { - $this->success = array(); - $_size1127 = 0; - $_etype1130 = 0; - $xfer += $input->readListBegin($_etype1130, $_size1127); - for ($_i1131 = 0; $_i1131 < $_size1127; ++$_i1131) - { - $elem1132 = null; - $xfer += $input->readString($elem1132); - $this->success []= $elem1132; - } - $xfer += $input->readListEnd(); - } else { - $xfer += $input->skip($ftype); - } - break; - case 1: - if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\MetaException(); - $xfer += $this->o1->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); - } - $xfer += $input->readStructEnd(); - return $xfer; - } - - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_databases_result'); - if ($this->success !== null) { - if (!is_array($this->success)) { - throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); - } - $xfer += $output->writeFieldBegin('success', TType::LST, 0); - { - $output->writeListBegin(TType::STRING, count($this->success)); - { - foreach ($this->success as $iter1133) - { - $xfer += $output->writeString($iter1133); - } - } - $output->writeListEnd(); - } - $xfer += $output->writeFieldEnd(); - } - if ($this->o1 !== null) { - $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); - $xfer += $this->o1->write($output); - $xfer += $output->writeFieldEnd(); - } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; - } - -} - -class ThriftHiveMetastore_get_all_databases_args { - static $_TSPEC; - - - public function __construct() { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - ); - } - } - - public function getName() { - return 'ThriftHiveMetastore_get_all_databases_args'; - } - - public function read($input) - { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) - { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); - } - $xfer += $input->readStructEnd(); - return $xfer; - } - - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_databases_args'); - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; - } - -} - -class ThriftHiveMetastore_get_all_databases_result { - static $_TSPEC; - - /** - * @var string[] - */ - public $success = null; - /** - * @var \metastore\MetaException - */ - public $o1 = null; - - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 0 => array( - 'var' => 'success', - 'type' => TType::LST, - 'etype' => TType::STRING, - 'elem' => array( - 'type' => TType::STRING, - ), - ), - 1 => array( - 'var' => 'o1', - 'type' => TType::STRUCT, - 'class' => '\metastore\MetaException', - ), - ); - } - if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; - } - if (isset($vals['o1'])) { - $this->o1 = $vals['o1']; - } - } - } - - public function getName() { - return 'ThriftHiveMetastore_get_all_databases_result'; - } - public function read($input) { $xfer = 0; @@ -17854,7 +17794,7 @@ class ThriftHiveMetastore_get_all_databases_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_databases_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_databases_result'); if ($this->success !== null) { if (!is_array($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -17884,6 +17824,182 @@ class ThriftHiveMetastore_get_all_databases_result { } +class ThriftHiveMetastore_get_all_databases_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_all_databases_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_databases_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_all_databases_result { + static $_TSPEC; + + /** + * @var string[] + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_all_databases_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size1141 = 0; + $_etype1144 = 0; + $xfer += $input->readListBegin($_etype1144, $_size1141); + for ($_i1145 = 0; $_i1145 < $_size1141; ++$_i1145) + { + $elem1146 = null; + $xfer += $input->readString($elem1146); + $this->success []= $elem1146; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_databases_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRING, count($this->success)); + { + foreach ($this->success as $iter1147) + { + $xfer += $output->writeString($iter1147); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_alter_database_args { static $_TSPEC; @@ -18866,18 +18982,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1141 = 0; - $_ktype1142 = 0; - $_vtype1143 = 0; - $xfer += $input->readMapBegin($_ktype1142, $_vtype1143, $_size1141); - for ($_i1145 = 0; $_i1145 < $_size1141; ++$_i1145) + $_size1148 = 0; + $_ktype1149 = 0; + $_vtype1150 = 0; + $xfer += $input->readMapBegin($_ktype1149, $_vtype1150, $_size1148); + for ($_i1152 = 0; $_i1152 < $_size1148; ++$_i1152) { - $key1146 = ''; - $val1147 = new \metastore\Type(); - $xfer += $input->readString($key1146); - $val1147 = new \metastore\Type(); - $xfer += $val1147->read($input); - $this->success[$key1146] = $val1147; + $key1153 = ''; + $val1154 = new \metastore\Type(); + $xfer += $input->readString($key1153); + $val1154 = new \metastore\Type(); + $xfer += $val1154->read($input); + $this->success[$key1153] = $val1154; } $xfer += $input->readMapEnd(); } else { @@ -18913,10 +19029,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter1148 => $viter1149) + foreach ($this->success as $kiter1155 => $viter1156) { - $xfer += $output->writeString($kiter1148); - $xfer += $viter1149->write($output); + $xfer += $output->writeString($kiter1155); + $xfer += $viter1156->write($output); } } $output->writeMapEnd(); @@ -19120,15 +19236,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1150 = 0; - $_etype1153 = 0; - $xfer += $input->readListBegin($_etype1153, $_size1150); - for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154) + $_size1157 = 0; + $_etype1160 = 0; + $xfer += $input->readListBegin($_etype1160, $_size1157); + for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) { - $elem1155 = null; - $elem1155 = new \metastore\FieldSchema(); - $xfer += $elem1155->read($input); - $this->success []= $elem1155; + $elem1162 = null; + $elem1162 = new \metastore\FieldSchema(); + $xfer += $elem1162->read($input); + $this->success []= $elem1162; } $xfer += $input->readListEnd(); } else { @@ -19180,9 +19296,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1156) + foreach ($this->success as $iter1163) { - $xfer += $iter1156->write($output); + $xfer += $iter1163->write($output); } } $output->writeListEnd(); @@ -19424,15 +19540,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1157 = 0; - $_etype1160 = 0; - $xfer += $input->readListBegin($_etype1160, $_size1157); - for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) + $_size1164 = 0; + $_etype1167 = 0; + $xfer += $input->readListBegin($_etype1167, $_size1164); + for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) { - $elem1162 = null; - $elem1162 = new \metastore\FieldSchema(); - $xfer += $elem1162->read($input); - $this->success []= $elem1162; + $elem1169 = null; + $elem1169 = new \metastore\FieldSchema(); + $xfer += $elem1169->read($input); + $this->success []= $elem1169; } $xfer += $input->readListEnd(); } else { @@ -19484,9 +19600,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1163) + foreach ($this->success as $iter1170) { - $xfer += $iter1163->write($output); + $xfer += $iter1170->write($output); } } $output->writeListEnd(); @@ -19935,15 +20051,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1164 = 0; - $_etype1167 = 0; - $xfer += $input->readListBegin($_etype1167, $_size1164); - for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) + $_size1171 = 0; + $_etype1174 = 0; + $xfer += $input->readListBegin($_etype1174, $_size1171); + for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) { - $elem1169 = null; - $elem1169 = new \metastore\FieldSchema(); - $xfer += $elem1169->read($input); - $this->success []= $elem1169; + $elem1176 = null; + $elem1176 = new \metastore\FieldSchema(); + $xfer += $elem1176->read($input); + $this->success []= $elem1176; } $xfer += $input->readListEnd(); } else { @@ -19995,9 +20111,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1170) + foreach ($this->success as $iter1177) { - $xfer += $iter1170->write($output); + $xfer += $iter1177->write($output); } } $output->writeListEnd(); @@ -20239,15 +20355,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1171 = 0; - $_etype1174 = 0; - $xfer += $input->readListBegin($_etype1174, $_size1171); - for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) + $_size1178 = 0; + $_etype1181 = 0; + $xfer += $input->readListBegin($_etype1181, $_size1178); + for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) { - $elem1176 = null; - $elem1176 = new \metastore\FieldSchema(); - $xfer += $elem1176->read($input); - $this->success []= $elem1176; + $elem1183 = null; + $elem1183 = new \metastore\FieldSchema(); + $xfer += $elem1183->read($input); + $this->success []= $elem1183; } $xfer += $input->readListEnd(); } else { @@ -20299,9 +20415,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1177) + foreach ($this->success as $iter1184) { - $xfer += $iter1177->write($output); + $xfer += $iter1184->write($output); } } $output->writeListEnd(); @@ -21208,15 +21324,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size1178 = 0; - $_etype1181 = 0; - $xfer += $input->readListBegin($_etype1181, $_size1178); - for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) + $_size1185 = 0; + $_etype1188 = 0; + $xfer += $input->readListBegin($_etype1188, $_size1185); + for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) { - $elem1183 = null; - $elem1183 = new \metastore\SQLPrimaryKey(); - $xfer += $elem1183->read($input); - $this->primaryKeys []= $elem1183; + $elem1190 = null; + $elem1190 = new \metastore\SQLPrimaryKey(); + $xfer += $elem1190->read($input); + $this->primaryKeys []= $elem1190; } $xfer += $input->readListEnd(); } else { @@ -21226,15 +21342,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size1184 = 0; - $_etype1187 = 0; - $xfer += $input->readListBegin($_etype1187, $_size1184); - for ($_i1188 = 0; $_i1188 < $_size1184; ++$_i1188) + $_size1191 = 0; + $_etype1194 = 0; + $xfer += $input->readListBegin($_etype1194, $_size1191); + for ($_i1195 = 0; $_i1195 < $_size1191; ++$_i1195) { - $elem1189 = null; - $elem1189 = new \metastore\SQLForeignKey(); - $xfer += $elem1189->read($input); - $this->foreignKeys []= $elem1189; + $elem1196 = null; + $elem1196 = new \metastore\SQLForeignKey(); + $xfer += $elem1196->read($input); + $this->foreignKeys []= $elem1196; } $xfer += $input->readListEnd(); } else { @@ -21244,15 +21360,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size1190 = 0; - $_etype1193 = 0; - $xfer += $input->readListBegin($_etype1193, $_size1190); - for ($_i1194 = 0; $_i1194 < $_size1190; ++$_i1194) + $_size1197 = 0; + $_etype1200 = 0; + $xfer += $input->readListBegin($_etype1200, $_size1197); + for ($_i1201 = 0; $_i1201 < $_size1197; ++$_i1201) { - $elem1195 = null; - $elem1195 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem1195->read($input); - $this->uniqueConstraints []= $elem1195; + $elem1202 = null; + $elem1202 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem1202->read($input); + $this->uniqueConstraints []= $elem1202; } $xfer += $input->readListEnd(); } else { @@ -21262,15 +21378,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size1196 = 0; - $_etype1199 = 0; - $xfer += $input->readListBegin($_etype1199, $_size1196); - for ($_i1200 = 0; $_i1200 < $_size1196; ++$_i1200) + $_size1203 = 0; + $_etype1206 = 0; + $xfer += $input->readListBegin($_etype1206, $_size1203); + for ($_i1207 = 0; $_i1207 < $_size1203; ++$_i1207) { - $elem1201 = null; - $elem1201 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem1201->read($input); - $this->notNullConstraints []= $elem1201; + $elem1208 = null; + $elem1208 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem1208->read($input); + $this->notNullConstraints []= $elem1208; } $xfer += $input->readListEnd(); } else { @@ -21280,15 +21396,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 6: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size1202 = 0; - $_etype1205 = 0; - $xfer += $input->readListBegin($_etype1205, $_size1202); - for ($_i1206 = 0; $_i1206 < $_size1202; ++$_i1206) + $_size1209 = 0; + $_etype1212 = 0; + $xfer += $input->readListBegin($_etype1212, $_size1209); + for ($_i1213 = 0; $_i1213 < $_size1209; ++$_i1213) { - $elem1207 = null; - $elem1207 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem1207->read($input); - $this->defaultConstraints []= $elem1207; + $elem1214 = null; + $elem1214 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem1214->read($input); + $this->defaultConstraints []= $elem1214; } $xfer += $input->readListEnd(); } else { @@ -21298,15 +21414,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 7: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size1208 = 0; - $_etype1211 = 0; - $xfer += $input->readListBegin($_etype1211, $_size1208); - for ($_i1212 = 0; $_i1212 < $_size1208; ++$_i1212) + $_size1215 = 0; + $_etype1218 = 0; + $xfer += $input->readListBegin($_etype1218, $_size1215); + for ($_i1219 = 0; $_i1219 < $_size1215; ++$_i1219) { - $elem1213 = null; - $elem1213 = new \metastore\SQLCheckConstraint(); - $xfer += $elem1213->read($input); - $this->checkConstraints []= $elem1213; + $elem1220 = null; + $elem1220 = new \metastore\SQLCheckConstraint(); + $xfer += $elem1220->read($input); + $this->checkConstraints []= $elem1220; } $xfer += $input->readListEnd(); } else { @@ -21342,9 +21458,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter1214) + foreach ($this->primaryKeys as $iter1221) { - $xfer += $iter1214->write($output); + $xfer += $iter1221->write($output); } } $output->writeListEnd(); @@ -21359,9 +21475,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter1215) + foreach ($this->foreignKeys as $iter1222) { - $xfer += $iter1215->write($output); + $xfer += $iter1222->write($output); } } $output->writeListEnd(); @@ -21376,9 +21492,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter1216) + foreach ($this->uniqueConstraints as $iter1223) { - $xfer += $iter1216->write($output); + $xfer += $iter1223->write($output); } } $output->writeListEnd(); @@ -21393,9 +21509,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter1217) + foreach ($this->notNullConstraints as $iter1224) { - $xfer += $iter1217->write($output); + $xfer += $iter1224->write($output); } } $output->writeListEnd(); @@ -21410,9 +21526,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter1218) + foreach ($this->defaultConstraints as $iter1225) { - $xfer += $iter1218->write($output); + $xfer += $iter1225->write($output); } } $output->writeListEnd(); @@ -21427,9 +21543,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); { - foreach ($this->checkConstraints as $iter1219) + foreach ($this->checkConstraints as $iter1226) { - $xfer += $iter1219->write($output); + $xfer += $iter1226->write($output); } } $output->writeListEnd(); @@ -23661,14 +23777,14 @@ class ThriftHiveMetastore_truncate_table_args { case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size1220 = 0; - $_etype1223 = 0; - $xfer += $input->readListBegin($_etype1223, $_size1220); - for ($_i1224 = 0; $_i1224 < $_size1220; ++$_i1224) + $_size1227 = 0; + $_etype1230 = 0; + $xfer += $input->readListBegin($_etype1230, $_size1227); + for ($_i1231 = 0; $_i1231 < $_size1227; ++$_i1231) { - $elem1225 = null; - $xfer += $input->readString($elem1225); - $this->partNames []= $elem1225; + $elem1232 = null; + $xfer += $input->readString($elem1232); + $this->partNames []= $elem1232; } $xfer += $input->readListEnd(); } else { @@ -23706,9 +23822,9 @@ class ThriftHiveMetastore_truncate_table_args { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter1226) + foreach ($this->partNames as $iter1233) { - $xfer += $output->writeString($iter1226); + $xfer += $output->writeString($iter1233); } } $output->writeListEnd(); @@ -24144,14 +24260,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1227 = 0; - $_etype1230 = 0; - $xfer += $input->readListBegin($_etype1230, $_size1227); - for ($_i1231 = 0; $_i1231 < $_size1227; ++$_i1231) + $_size1234 = 0; + $_etype1237 = 0; + $xfer += $input->readListBegin($_etype1237, $_size1234); + for ($_i1238 = 0; $_i1238 < $_size1234; ++$_i1238) { - $elem1232 = null; - $xfer += $input->readString($elem1232); - $this->success []= $elem1232; + $elem1239 = null; + $xfer += $input->readString($elem1239); + $this->success []= $elem1239; } $xfer += $input->readListEnd(); } else { @@ -24187,9 +24303,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1233) + foreach ($this->success as $iter1240) { - $xfer += $output->writeString($iter1233); + $xfer += $output->writeString($iter1240); } } $output->writeListEnd(); @@ -24391,14 +24507,14 @@ class ThriftHiveMetastore_get_tables_by_type_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1234 = 0; - $_etype1237 = 0; - $xfer += $input->readListBegin($_etype1237, $_size1234); - for ($_i1238 = 0; $_i1238 < $_size1234; ++$_i1238) + $_size1241 = 0; + $_etype1244 = 0; + $xfer += $input->readListBegin($_etype1244, $_size1241); + for ($_i1245 = 0; $_i1245 < $_size1241; ++$_i1245) { - $elem1239 = null; - $xfer += $input->readString($elem1239); - $this->success []= $elem1239; + $elem1246 = null; + $xfer += $input->readString($elem1246); + $this->success []= $elem1246; } $xfer += $input->readListEnd(); } else { @@ -24434,9 +24550,9 @@ class ThriftHiveMetastore_get_tables_by_type_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1240) + foreach ($this->success as $iter1247) { - $xfer += $output->writeString($iter1240); + $xfer += $output->writeString($iter1247); } } $output->writeListEnd(); @@ -24568,15 +24684,15 @@ class ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_result case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1241 = 0; - $_etype1244 = 0; - $xfer += $input->readListBegin($_etype1244, $_size1241); - for ($_i1245 = 0; $_i1245 < $_size1241; ++$_i1245) + $_size1248 = 0; + $_etype1251 = 0; + $xfer += $input->readListBegin($_etype1251, $_size1248); + for ($_i1252 = 0; $_i1252 < $_size1248; ++$_i1252) { - $elem1246 = null; - $elem1246 = new \metastore\Table(); - $xfer += $elem1246->read($input); - $this->success []= $elem1246; + $elem1253 = null; + $elem1253 = new \metastore\Table(); + $xfer += $elem1253->read($input); + $this->success []= $elem1253; } $xfer += $input->readListEnd(); } else { @@ -24612,9 +24728,9 @@ class ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_result { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1247) + foreach ($this->success as $iter1254) { - $xfer += $iter1247->write($output); + $xfer += $iter1254->write($output); } } $output->writeListEnd(); @@ -24770,14 +24886,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1248 = 0; - $_etype1251 = 0; - $xfer += $input->readListBegin($_etype1251, $_size1248); - for ($_i1252 = 0; $_i1252 < $_size1248; ++$_i1252) + $_size1255 = 0; + $_etype1258 = 0; + $xfer += $input->readListBegin($_etype1258, $_size1255); + for ($_i1259 = 0; $_i1259 < $_size1255; ++$_i1259) { - $elem1253 = null; - $xfer += $input->readString($elem1253); - $this->success []= $elem1253; + $elem1260 = null; + $xfer += $input->readString($elem1260); + $this->success []= $elem1260; } $xfer += $input->readListEnd(); } else { @@ -24813,9 +24929,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1254) + foreach ($this->success as $iter1261) { - $xfer += $output->writeString($iter1254); + $xfer += $output->writeString($iter1261); } } $output->writeListEnd(); @@ -24920,14 +25036,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size1255 = 0; - $_etype1258 = 0; - $xfer += $input->readListBegin($_etype1258, $_size1255); - for ($_i1259 = 0; $_i1259 < $_size1255; ++$_i1259) + $_size1262 = 0; + $_etype1265 = 0; + $xfer += $input->readListBegin($_etype1265, $_size1262); + for ($_i1266 = 0; $_i1266 < $_size1262; ++$_i1266) { - $elem1260 = null; - $xfer += $input->readString($elem1260); - $this->tbl_types []= $elem1260; + $elem1267 = null; + $xfer += $input->readString($elem1267); + $this->tbl_types []= $elem1267; } $xfer += $input->readListEnd(); } else { @@ -24965,9 +25081,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter1261) + foreach ($this->tbl_types as $iter1268) { - $xfer += $output->writeString($iter1261); + $xfer += $output->writeString($iter1268); } } $output->writeListEnd(); @@ -25044,15 +25160,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1262 = 0; - $_etype1265 = 0; - $xfer += $input->readListBegin($_etype1265, $_size1262); - for ($_i1266 = 0; $_i1266 < $_size1262; ++$_i1266) + $_size1269 = 0; + $_etype1272 = 0; + $xfer += $input->readListBegin($_etype1272, $_size1269); + for ($_i1273 = 0; $_i1273 < $_size1269; ++$_i1273) { - $elem1267 = null; - $elem1267 = new \metastore\TableMeta(); - $xfer += $elem1267->read($input); - $this->success []= $elem1267; + $elem1274 = null; + $elem1274 = new \metastore\TableMeta(); + $xfer += $elem1274->read($input); + $this->success []= $elem1274; } $xfer += $input->readListEnd(); } else { @@ -25088,9 +25204,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1268) + foreach ($this->success as $iter1275) { - $xfer += $iter1268->write($output); + $xfer += $iter1275->write($output); } } $output->writeListEnd(); @@ -25246,14 +25362,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1269 = 0; - $_etype1272 = 0; - $xfer += $input->readListBegin($_etype1272, $_size1269); - for ($_i1273 = 0; $_i1273 < $_size1269; ++$_i1273) + $_size1276 = 0; + $_etype1279 = 0; + $xfer += $input->readListBegin($_etype1279, $_size1276); + for ($_i1280 = 0; $_i1280 < $_size1276; ++$_i1280) { - $elem1274 = null; - $xfer += $input->readString($elem1274); - $this->success []= $elem1274; + $elem1281 = null; + $xfer += $input->readString($elem1281); + $this->success []= $elem1281; } $xfer += $input->readListEnd(); } else { @@ -25289,9 +25405,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1275) + foreach ($this->success as $iter1282) { - $xfer += $output->writeString($iter1275); + $xfer += $output->writeString($iter1282); } } $output->writeListEnd(); @@ -25606,14 +25722,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size1276 = 0; - $_etype1279 = 0; - $xfer += $input->readListBegin($_etype1279, $_size1276); - for ($_i1280 = 0; $_i1280 < $_size1276; ++$_i1280) + $_size1283 = 0; + $_etype1286 = 0; + $xfer += $input->readListBegin($_etype1286, $_size1283); + for ($_i1287 = 0; $_i1287 < $_size1283; ++$_i1287) { - $elem1281 = null; - $xfer += $input->readString($elem1281); - $this->tbl_names []= $elem1281; + $elem1288 = null; + $xfer += $input->readString($elem1288); + $this->tbl_names []= $elem1288; } $xfer += $input->readListEnd(); } else { @@ -25646,9 +25762,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter1282) + foreach ($this->tbl_names as $iter1289) { - $xfer += $output->writeString($iter1282); + $xfer += $output->writeString($iter1289); } } $output->writeListEnd(); @@ -25713,15 +25829,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1283 = 0; - $_etype1286 = 0; - $xfer += $input->readListBegin($_etype1286, $_size1283); - for ($_i1287 = 0; $_i1287 < $_size1283; ++$_i1287) + $_size1290 = 0; + $_etype1293 = 0; + $xfer += $input->readListBegin($_etype1293, $_size1290); + for ($_i1294 = 0; $_i1294 < $_size1290; ++$_i1294) { - $elem1288 = null; - $elem1288 = new \metastore\Table(); - $xfer += $elem1288->read($input); - $this->success []= $elem1288; + $elem1295 = null; + $elem1295 = new \metastore\Table(); + $xfer += $elem1295->read($input); + $this->success []= $elem1295; } $xfer += $input->readListEnd(); } else { @@ -25749,9 +25865,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1289) + foreach ($this->success as $iter1296) { - $xfer += $iter1289->write($output); + $xfer += $iter1296->write($output); } } $output->writeListEnd(); @@ -25908,15 +26024,15 @@ class ThriftHiveMetastore_get_tables_ext_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1290 = 0; - $_etype1293 = 0; - $xfer += $input->readListBegin($_etype1293, $_size1290); - for ($_i1294 = 0; $_i1294 < $_size1290; ++$_i1294) + $_size1297 = 0; + $_etype1300 = 0; + $xfer += $input->readListBegin($_etype1300, $_size1297); + for ($_i1301 = 0; $_i1301 < $_size1297; ++$_i1301) { - $elem1295 = null; - $elem1295 = new \metastore\ExtendedTableInfo(); - $xfer += $elem1295->read($input); - $this->success []= $elem1295; + $elem1302 = null; + $elem1302 = new \metastore\ExtendedTableInfo(); + $xfer += $elem1302->read($input); + $this->success []= $elem1302; } $xfer += $input->readListEnd(); } else { @@ -25952,9 +26068,9 @@ class ThriftHiveMetastore_get_tables_ext_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1296) + foreach ($this->success as $iter1303) { - $xfer += $iter1296->write($output); + $xfer += $iter1303->write($output); } } $output->writeListEnd(); @@ -27159,14 +27275,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1297 = 0; - $_etype1300 = 0; - $xfer += $input->readListBegin($_etype1300, $_size1297); - for ($_i1301 = 0; $_i1301 < $_size1297; ++$_i1301) + $_size1304 = 0; + $_etype1307 = 0; + $xfer += $input->readListBegin($_etype1307, $_size1304); + for ($_i1308 = 0; $_i1308 < $_size1304; ++$_i1308) { - $elem1302 = null; - $xfer += $input->readString($elem1302); - $this->success []= $elem1302; + $elem1309 = null; + $xfer += $input->readString($elem1309); + $this->success []= $elem1309; } $xfer += $input->readListEnd(); } else { @@ -27218,9 +27334,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1303) + foreach ($this->success as $iter1310) { - $xfer += $output->writeString($iter1303); + $xfer += $output->writeString($iter1310); } } $output->writeListEnd(); @@ -28743,15 +28859,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1304 = 0; - $_etype1307 = 0; - $xfer += $input->readListBegin($_etype1307, $_size1304); - for ($_i1308 = 0; $_i1308 < $_size1304; ++$_i1308) + $_size1311 = 0; + $_etype1314 = 0; + $xfer += $input->readListBegin($_etype1314, $_size1311); + for ($_i1315 = 0; $_i1315 < $_size1311; ++$_i1315) { - $elem1309 = null; - $elem1309 = new \metastore\Partition(); - $xfer += $elem1309->read($input); - $this->new_parts []= $elem1309; + $elem1316 = null; + $elem1316 = new \metastore\Partition(); + $xfer += $elem1316->read($input); + $this->new_parts []= $elem1316; } $xfer += $input->readListEnd(); } else { @@ -28779,9 +28895,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1310) + foreach ($this->new_parts as $iter1317) { - $xfer += $iter1310->write($output); + $xfer += $iter1317->write($output); } } $output->writeListEnd(); @@ -28996,15 +29112,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1311 = 0; - $_etype1314 = 0; - $xfer += $input->readListBegin($_etype1314, $_size1311); - for ($_i1315 = 0; $_i1315 < $_size1311; ++$_i1315) + $_size1318 = 0; + $_etype1321 = 0; + $xfer += $input->readListBegin($_etype1321, $_size1318); + for ($_i1322 = 0; $_i1322 < $_size1318; ++$_i1322) { - $elem1316 = null; - $elem1316 = new \metastore\PartitionSpec(); - $xfer += $elem1316->read($input); - $this->new_parts []= $elem1316; + $elem1323 = null; + $elem1323 = new \metastore\PartitionSpec(); + $xfer += $elem1323->read($input); + $this->new_parts []= $elem1323; } $xfer += $input->readListEnd(); } else { @@ -29032,9 +29148,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1317) + foreach ($this->new_parts as $iter1324) { - $xfer += $iter1317->write($output); + $xfer += $iter1324->write($output); } } $output->writeListEnd(); @@ -29284,14 +29400,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1318 = 0; - $_etype1321 = 0; - $xfer += $input->readListBegin($_etype1321, $_size1318); - for ($_i1322 = 0; $_i1322 < $_size1318; ++$_i1322) + $_size1325 = 0; + $_etype1328 = 0; + $xfer += $input->readListBegin($_etype1328, $_size1325); + for ($_i1329 = 0; $_i1329 < $_size1325; ++$_i1329) { - $elem1323 = null; - $xfer += $input->readString($elem1323); - $this->part_vals []= $elem1323; + $elem1330 = null; + $xfer += $input->readString($elem1330); + $this->part_vals []= $elem1330; } $xfer += $input->readListEnd(); } else { @@ -29329,9 +29445,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1324) + foreach ($this->part_vals as $iter1331) { - $xfer += $output->writeString($iter1324); + $xfer += $output->writeString($iter1331); } } $output->writeListEnd(); @@ -29833,14 +29949,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1325 = 0; - $_etype1328 = 0; - $xfer += $input->readListBegin($_etype1328, $_size1325); - for ($_i1329 = 0; $_i1329 < $_size1325; ++$_i1329) + $_size1332 = 0; + $_etype1335 = 0; + $xfer += $input->readListBegin($_etype1335, $_size1332); + for ($_i1336 = 0; $_i1336 < $_size1332; ++$_i1336) { - $elem1330 = null; - $xfer += $input->readString($elem1330); - $this->part_vals []= $elem1330; + $elem1337 = null; + $xfer += $input->readString($elem1337); + $this->part_vals []= $elem1337; } $xfer += $input->readListEnd(); } else { @@ -29886,9 +30002,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1331) + foreach ($this->part_vals as $iter1338) { - $xfer += $output->writeString($iter1331); + $xfer += $output->writeString($iter1338); } } $output->writeListEnd(); @@ -30742,14 +30858,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1332 = 0; - $_etype1335 = 0; - $xfer += $input->readListBegin($_etype1335, $_size1332); - for ($_i1336 = 0; $_i1336 < $_size1332; ++$_i1336) + $_size1339 = 0; + $_etype1342 = 0; + $xfer += $input->readListBegin($_etype1342, $_size1339); + for ($_i1343 = 0; $_i1343 < $_size1339; ++$_i1343) { - $elem1337 = null; - $xfer += $input->readString($elem1337); - $this->part_vals []= $elem1337; + $elem1344 = null; + $xfer += $input->readString($elem1344); + $this->part_vals []= $elem1344; } $xfer += $input->readListEnd(); } else { @@ -30794,9 +30910,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1338) + foreach ($this->part_vals as $iter1345) { - $xfer += $output->writeString($iter1338); + $xfer += $output->writeString($iter1345); } } $output->writeListEnd(); @@ -31049,14 +31165,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1339 = 0; - $_etype1342 = 0; - $xfer += $input->readListBegin($_etype1342, $_size1339); - for ($_i1343 = 0; $_i1343 < $_size1339; ++$_i1343) + $_size1346 = 0; + $_etype1349 = 0; + $xfer += $input->readListBegin($_etype1349, $_size1346); + for ($_i1350 = 0; $_i1350 < $_size1346; ++$_i1350) { - $elem1344 = null; - $xfer += $input->readString($elem1344); - $this->part_vals []= $elem1344; + $elem1351 = null; + $xfer += $input->readString($elem1351); + $this->part_vals []= $elem1351; } $xfer += $input->readListEnd(); } else { @@ -31109,9 +31225,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1345) + foreach ($this->part_vals as $iter1352) { - $xfer += $output->writeString($iter1345); + $xfer += $output->writeString($iter1352); } } $output->writeListEnd(); @@ -32125,14 +32241,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1346 = 0; - $_etype1349 = 0; - $xfer += $input->readListBegin($_etype1349, $_size1346); - for ($_i1350 = 0; $_i1350 < $_size1346; ++$_i1350) + $_size1353 = 0; + $_etype1356 = 0; + $xfer += $input->readListBegin($_etype1356, $_size1353); + for ($_i1357 = 0; $_i1357 < $_size1353; ++$_i1357) { - $elem1351 = null; - $xfer += $input->readString($elem1351); - $this->part_vals []= $elem1351; + $elem1358 = null; + $xfer += $input->readString($elem1358); + $this->part_vals []= $elem1358; } $xfer += $input->readListEnd(); } else { @@ -32170,9 +32286,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1352) + foreach ($this->part_vals as $iter1359) { - $xfer += $output->writeString($iter1352); + $xfer += $output->writeString($iter1359); } } $output->writeListEnd(); @@ -32624,17 +32740,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1353 = 0; - $_ktype1354 = 0; - $_vtype1355 = 0; - $xfer += $input->readMapBegin($_ktype1354, $_vtype1355, $_size1353); - for ($_i1357 = 0; $_i1357 < $_size1353; ++$_i1357) + $_size1360 = 0; + $_ktype1361 = 0; + $_vtype1362 = 0; + $xfer += $input->readMapBegin($_ktype1361, $_vtype1362, $_size1360); + for ($_i1364 = 0; $_i1364 < $_size1360; ++$_i1364) { - $key1358 = ''; - $val1359 = ''; - $xfer += $input->readString($key1358); - $xfer += $input->readString($val1359); - $this->partitionSpecs[$key1358] = $val1359; + $key1365 = ''; + $val1366 = ''; + $xfer += $input->readString($key1365); + $xfer += $input->readString($val1366); + $this->partitionSpecs[$key1365] = $val1366; } $xfer += $input->readMapEnd(); } else { @@ -32690,10 +32806,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1360 => $viter1361) + foreach ($this->partitionSpecs as $kiter1367 => $viter1368) { - $xfer += $output->writeString($kiter1360); - $xfer += $output->writeString($viter1361); + $xfer += $output->writeString($kiter1367); + $xfer += $output->writeString($viter1368); } } $output->writeMapEnd(); @@ -33005,17 +33121,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1362 = 0; - $_ktype1363 = 0; - $_vtype1364 = 0; - $xfer += $input->readMapBegin($_ktype1363, $_vtype1364, $_size1362); - for ($_i1366 = 0; $_i1366 < $_size1362; ++$_i1366) + $_size1369 = 0; + $_ktype1370 = 0; + $_vtype1371 = 0; + $xfer += $input->readMapBegin($_ktype1370, $_vtype1371, $_size1369); + for ($_i1373 = 0; $_i1373 < $_size1369; ++$_i1373) { - $key1367 = ''; - $val1368 = ''; - $xfer += $input->readString($key1367); - $xfer += $input->readString($val1368); - $this->partitionSpecs[$key1367] = $val1368; + $key1374 = ''; + $val1375 = ''; + $xfer += $input->readString($key1374); + $xfer += $input->readString($val1375); + $this->partitionSpecs[$key1374] = $val1375; } $xfer += $input->readMapEnd(); } else { @@ -33071,10 +33187,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1369 => $viter1370) + foreach ($this->partitionSpecs as $kiter1376 => $viter1377) { - $xfer += $output->writeString($kiter1369); - $xfer += $output->writeString($viter1370); + $xfer += $output->writeString($kiter1376); + $xfer += $output->writeString($viter1377); } } $output->writeMapEnd(); @@ -33207,15 +33323,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1371 = 0; - $_etype1374 = 0; - $xfer += $input->readListBegin($_etype1374, $_size1371); - for ($_i1375 = 0; $_i1375 < $_size1371; ++$_i1375) + $_size1378 = 0; + $_etype1381 = 0; + $xfer += $input->readListBegin($_etype1381, $_size1378); + for ($_i1382 = 0; $_i1382 < $_size1378; ++$_i1382) { - $elem1376 = null; - $elem1376 = new \metastore\Partition(); - $xfer += $elem1376->read($input); - $this->success []= $elem1376; + $elem1383 = null; + $elem1383 = new \metastore\Partition(); + $xfer += $elem1383->read($input); + $this->success []= $elem1383; } $xfer += $input->readListEnd(); } else { @@ -33275,9 +33391,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1377) + foreach ($this->success as $iter1384) { - $xfer += $iter1377->write($output); + $xfer += $iter1384->write($output); } } $output->writeListEnd(); @@ -33423,14 +33539,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1378 = 0; - $_etype1381 = 0; - $xfer += $input->readListBegin($_etype1381, $_size1378); - for ($_i1382 = 0; $_i1382 < $_size1378; ++$_i1382) + $_size1385 = 0; + $_etype1388 = 0; + $xfer += $input->readListBegin($_etype1388, $_size1385); + for ($_i1389 = 0; $_i1389 < $_size1385; ++$_i1389) { - $elem1383 = null; - $xfer += $input->readString($elem1383); - $this->part_vals []= $elem1383; + $elem1390 = null; + $xfer += $input->readString($elem1390); + $this->part_vals []= $elem1390; } $xfer += $input->readListEnd(); } else { @@ -33447,14 +33563,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1384 = 0; - $_etype1387 = 0; - $xfer += $input->readListBegin($_etype1387, $_size1384); - for ($_i1388 = 0; $_i1388 < $_size1384; ++$_i1388) + $_size1391 = 0; + $_etype1394 = 0; + $xfer += $input->readListBegin($_etype1394, $_size1391); + for ($_i1395 = 0; $_i1395 < $_size1391; ++$_i1395) { - $elem1389 = null; - $xfer += $input->readString($elem1389); - $this->group_names []= $elem1389; + $elem1396 = null; + $xfer += $input->readString($elem1396); + $this->group_names []= $elem1396; } $xfer += $input->readListEnd(); } else { @@ -33492,9 +33608,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1390) + foreach ($this->part_vals as $iter1397) { - $xfer += $output->writeString($iter1390); + $xfer += $output->writeString($iter1397); } } $output->writeListEnd(); @@ -33514,9 +33630,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1391) + foreach ($this->group_names as $iter1398) { - $xfer += $output->writeString($iter1391); + $xfer += $output->writeString($iter1398); } } $output->writeListEnd(); @@ -34107,15 +34223,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1392 = 0; - $_etype1395 = 0; - $xfer += $input->readListBegin($_etype1395, $_size1392); - for ($_i1396 = 0; $_i1396 < $_size1392; ++$_i1396) + $_size1399 = 0; + $_etype1402 = 0; + $xfer += $input->readListBegin($_etype1402, $_size1399); + for ($_i1403 = 0; $_i1403 < $_size1399; ++$_i1403) { - $elem1397 = null; - $elem1397 = new \metastore\Partition(); - $xfer += $elem1397->read($input); - $this->success []= $elem1397; + $elem1404 = null; + $elem1404 = new \metastore\Partition(); + $xfer += $elem1404->read($input); + $this->success []= $elem1404; } $xfer += $input->readListEnd(); } else { @@ -34159,9 +34275,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1398) + foreach ($this->success as $iter1405) { - $xfer += $iter1398->write($output); + $xfer += $iter1405->write($output); } } $output->writeListEnd(); @@ -34517,14 +34633,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1399 = 0; - $_etype1402 = 0; - $xfer += $input->readListBegin($_etype1402, $_size1399); - for ($_i1403 = 0; $_i1403 < $_size1399; ++$_i1403) + $_size1406 = 0; + $_etype1409 = 0; + $xfer += $input->readListBegin($_etype1409, $_size1406); + for ($_i1410 = 0; $_i1410 < $_size1406; ++$_i1410) { - $elem1404 = null; - $xfer += $input->readString($elem1404); - $this->group_names []= $elem1404; + $elem1411 = null; + $xfer += $input->readString($elem1411); + $this->group_names []= $elem1411; } $xfer += $input->readListEnd(); } else { @@ -34572,9 +34688,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1405) + foreach ($this->group_names as $iter1412) { - $xfer += $output->writeString($iter1405); + $xfer += $output->writeString($iter1412); } } $output->writeListEnd(); @@ -34663,15 +34779,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1406 = 0; - $_etype1409 = 0; - $xfer += $input->readListBegin($_etype1409, $_size1406); - for ($_i1410 = 0; $_i1410 < $_size1406; ++$_i1410) + $_size1413 = 0; + $_etype1416 = 0; + $xfer += $input->readListBegin($_etype1416, $_size1413); + for ($_i1417 = 0; $_i1417 < $_size1413; ++$_i1417) { - $elem1411 = null; - $elem1411 = new \metastore\Partition(); - $xfer += $elem1411->read($input); - $this->success []= $elem1411; + $elem1418 = null; + $elem1418 = new \metastore\Partition(); + $xfer += $elem1418->read($input); + $this->success []= $elem1418; } $xfer += $input->readListEnd(); } else { @@ -34715,9 +34831,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1412) + foreach ($this->success as $iter1419) { - $xfer += $iter1412->write($output); + $xfer += $iter1419->write($output); } } $output->writeListEnd(); @@ -34937,15 +35053,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1413 = 0; - $_etype1416 = 0; - $xfer += $input->readListBegin($_etype1416, $_size1413); - for ($_i1417 = 0; $_i1417 < $_size1413; ++$_i1417) + $_size1420 = 0; + $_etype1423 = 0; + $xfer += $input->readListBegin($_etype1423, $_size1420); + for ($_i1424 = 0; $_i1424 < $_size1420; ++$_i1424) { - $elem1418 = null; - $elem1418 = new \metastore\PartitionSpec(); - $xfer += $elem1418->read($input); - $this->success []= $elem1418; + $elem1425 = null; + $elem1425 = new \metastore\PartitionSpec(); + $xfer += $elem1425->read($input); + $this->success []= $elem1425; } $xfer += $input->readListEnd(); } else { @@ -34989,9 +35105,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1419) + foreach ($this->success as $iter1426) { - $xfer += $iter1419->write($output); + $xfer += $iter1426->write($output); } } $output->writeListEnd(); @@ -35210,14 +35326,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1420 = 0; - $_etype1423 = 0; - $xfer += $input->readListBegin($_etype1423, $_size1420); - for ($_i1424 = 0; $_i1424 < $_size1420; ++$_i1424) + $_size1427 = 0; + $_etype1430 = 0; + $xfer += $input->readListBegin($_etype1430, $_size1427); + for ($_i1431 = 0; $_i1431 < $_size1427; ++$_i1431) { - $elem1425 = null; - $xfer += $input->readString($elem1425); - $this->success []= $elem1425; + $elem1432 = null; + $xfer += $input->readString($elem1432); + $this->success []= $elem1432; } $xfer += $input->readListEnd(); } else { @@ -35261,9 +35377,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1426) + foreach ($this->success as $iter1433) { - $xfer += $output->writeString($iter1426); + $xfer += $output->writeString($iter1433); } } $output->writeListEnd(); @@ -35594,14 +35710,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1427 = 0; - $_etype1430 = 0; - $xfer += $input->readListBegin($_etype1430, $_size1427); - for ($_i1431 = 0; $_i1431 < $_size1427; ++$_i1431) + $_size1434 = 0; + $_etype1437 = 0; + $xfer += $input->readListBegin($_etype1437, $_size1434); + for ($_i1438 = 0; $_i1438 < $_size1434; ++$_i1438) { - $elem1432 = null; - $xfer += $input->readString($elem1432); - $this->part_vals []= $elem1432; + $elem1439 = null; + $xfer += $input->readString($elem1439); + $this->part_vals []= $elem1439; } $xfer += $input->readListEnd(); } else { @@ -35646,9 +35762,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1433) + foreach ($this->part_vals as $iter1440) { - $xfer += $output->writeString($iter1433); + $xfer += $output->writeString($iter1440); } } $output->writeListEnd(); @@ -35742,15 +35858,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1434 = 0; - $_etype1437 = 0; - $xfer += $input->readListBegin($_etype1437, $_size1434); - for ($_i1438 = 0; $_i1438 < $_size1434; ++$_i1438) + $_size1441 = 0; + $_etype1444 = 0; + $xfer += $input->readListBegin($_etype1444, $_size1441); + for ($_i1445 = 0; $_i1445 < $_size1441; ++$_i1445) { - $elem1439 = null; - $elem1439 = new \metastore\Partition(); - $xfer += $elem1439->read($input); - $this->success []= $elem1439; + $elem1446 = null; + $elem1446 = new \metastore\Partition(); + $xfer += $elem1446->read($input); + $this->success []= $elem1446; } $xfer += $input->readListEnd(); } else { @@ -35794,9 +35910,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1440) + foreach ($this->success as $iter1447) { - $xfer += $iter1440->write($output); + $xfer += $iter1447->write($output); } } $output->writeListEnd(); @@ -35943,14 +36059,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1441 = 0; - $_etype1444 = 0; - $xfer += $input->readListBegin($_etype1444, $_size1441); - for ($_i1445 = 0; $_i1445 < $_size1441; ++$_i1445) + $_size1448 = 0; + $_etype1451 = 0; + $xfer += $input->readListBegin($_etype1451, $_size1448); + for ($_i1452 = 0; $_i1452 < $_size1448; ++$_i1452) { - $elem1446 = null; - $xfer += $input->readString($elem1446); - $this->part_vals []= $elem1446; + $elem1453 = null; + $xfer += $input->readString($elem1453); + $this->part_vals []= $elem1453; } $xfer += $input->readListEnd(); } else { @@ -35974,14 +36090,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1447 = 0; - $_etype1450 = 0; - $xfer += $input->readListBegin($_etype1450, $_size1447); - for ($_i1451 = 0; $_i1451 < $_size1447; ++$_i1451) + $_size1454 = 0; + $_etype1457 = 0; + $xfer += $input->readListBegin($_etype1457, $_size1454); + for ($_i1458 = 0; $_i1458 < $_size1454; ++$_i1458) { - $elem1452 = null; - $xfer += $input->readString($elem1452); - $this->group_names []= $elem1452; + $elem1459 = null; + $xfer += $input->readString($elem1459); + $this->group_names []= $elem1459; } $xfer += $input->readListEnd(); } else { @@ -36019,9 +36135,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1453) + foreach ($this->part_vals as $iter1460) { - $xfer += $output->writeString($iter1453); + $xfer += $output->writeString($iter1460); } } $output->writeListEnd(); @@ -36046,9 +36162,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1454) + foreach ($this->group_names as $iter1461) { - $xfer += $output->writeString($iter1454); + $xfer += $output->writeString($iter1461); } } $output->writeListEnd(); @@ -36137,15 +36253,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1455 = 0; - $_etype1458 = 0; - $xfer += $input->readListBegin($_etype1458, $_size1455); - for ($_i1459 = 0; $_i1459 < $_size1455; ++$_i1459) + $_size1462 = 0; + $_etype1465 = 0; + $xfer += $input->readListBegin($_etype1465, $_size1462); + for ($_i1466 = 0; $_i1466 < $_size1462; ++$_i1466) { - $elem1460 = null; - $elem1460 = new \metastore\Partition(); - $xfer += $elem1460->read($input); - $this->success []= $elem1460; + $elem1467 = null; + $elem1467 = new \metastore\Partition(); + $xfer += $elem1467->read($input); + $this->success []= $elem1467; } $xfer += $input->readListEnd(); } else { @@ -36189,9 +36305,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1461) + foreach ($this->success as $iter1468) { - $xfer += $iter1461->write($output); + $xfer += $iter1468->write($output); } } $output->writeListEnd(); @@ -36522,14 +36638,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1462 = 0; - $_etype1465 = 0; - $xfer += $input->readListBegin($_etype1465, $_size1462); - for ($_i1466 = 0; $_i1466 < $_size1462; ++$_i1466) + $_size1469 = 0; + $_etype1472 = 0; + $xfer += $input->readListBegin($_etype1472, $_size1469); + for ($_i1473 = 0; $_i1473 < $_size1469; ++$_i1473) { - $elem1467 = null; - $xfer += $input->readString($elem1467); - $this->part_vals []= $elem1467; + $elem1474 = null; + $xfer += $input->readString($elem1474); + $this->part_vals []= $elem1474; } $xfer += $input->readListEnd(); } else { @@ -36574,9 +36690,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1468) + foreach ($this->part_vals as $iter1475) { - $xfer += $output->writeString($iter1468); + $xfer += $output->writeString($iter1475); } } $output->writeListEnd(); @@ -36669,14 +36785,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1469 = 0; - $_etype1472 = 0; - $xfer += $input->readListBegin($_etype1472, $_size1469); - for ($_i1473 = 0; $_i1473 < $_size1469; ++$_i1473) + $_size1476 = 0; + $_etype1479 = 0; + $xfer += $input->readListBegin($_etype1479, $_size1476); + for ($_i1480 = 0; $_i1480 < $_size1476; ++$_i1480) { - $elem1474 = null; - $xfer += $input->readString($elem1474); - $this->success []= $elem1474; + $elem1481 = null; + $xfer += $input->readString($elem1481); + $this->success []= $elem1481; } $xfer += $input->readListEnd(); } else { @@ -36720,9 +36836,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1475) + foreach ($this->success as $iter1482) { - $xfer += $output->writeString($iter1475); + $xfer += $output->writeString($iter1482); } } $output->writeListEnd(); @@ -37110,14 +37226,14 @@ class ThriftHiveMetastore_get_partition_names_req_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1476 = 0; - $_etype1479 = 0; - $xfer += $input->readListBegin($_etype1479, $_size1476); - for ($_i1480 = 0; $_i1480 < $_size1476; ++$_i1480) + $_size1483 = 0; + $_etype1486 = 0; + $xfer += $input->readListBegin($_etype1486, $_size1483); + for ($_i1487 = 0; $_i1487 < $_size1483; ++$_i1487) { - $elem1481 = null; - $xfer += $input->readString($elem1481); - $this->success []= $elem1481; + $elem1488 = null; + $xfer += $input->readString($elem1488); + $this->success []= $elem1488; } $xfer += $input->readListEnd(); } else { @@ -37161,9 +37277,9 @@ class ThriftHiveMetastore_get_partition_names_req_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1482) + foreach ($this->success as $iter1489) { - $xfer += $output->writeString($iter1482); + $xfer += $output->writeString($iter1489); } } $output->writeListEnd(); @@ -37406,15 +37522,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1483 = 0; - $_etype1486 = 0; - $xfer += $input->readListBegin($_etype1486, $_size1483); - for ($_i1487 = 0; $_i1487 < $_size1483; ++$_i1487) + $_size1490 = 0; + $_etype1493 = 0; + $xfer += $input->readListBegin($_etype1493, $_size1490); + for ($_i1494 = 0; $_i1494 < $_size1490; ++$_i1494) { - $elem1488 = null; - $elem1488 = new \metastore\Partition(); - $xfer += $elem1488->read($input); - $this->success []= $elem1488; + $elem1495 = null; + $elem1495 = new \metastore\Partition(); + $xfer += $elem1495->read($input); + $this->success []= $elem1495; } $xfer += $input->readListEnd(); } else { @@ -37458,9 +37574,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1489) + foreach ($this->success as $iter1496) { - $xfer += $iter1489->write($output); + $xfer += $iter1496->write($output); } } $output->writeListEnd(); @@ -37703,15 +37819,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1490 = 0; - $_etype1493 = 0; - $xfer += $input->readListBegin($_etype1493, $_size1490); - for ($_i1494 = 0; $_i1494 < $_size1490; ++$_i1494) + $_size1497 = 0; + $_etype1500 = 0; + $xfer += $input->readListBegin($_etype1500, $_size1497); + for ($_i1501 = 0; $_i1501 < $_size1497; ++$_i1501) { - $elem1495 = null; - $elem1495 = new \metastore\PartitionSpec(); - $xfer += $elem1495->read($input); - $this->success []= $elem1495; + $elem1502 = null; + $elem1502 = new \metastore\PartitionSpec(); + $xfer += $elem1502->read($input); + $this->success []= $elem1502; } $xfer += $input->readListEnd(); } else { @@ -37755,9 +37871,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1496) + foreach ($this->success as $iter1503) { - $xfer += $iter1496->write($output); + $xfer += $iter1503->write($output); } } $output->writeListEnd(); @@ -38533,14 +38649,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1497 = 0; - $_etype1500 = 0; - $xfer += $input->readListBegin($_etype1500, $_size1497); - for ($_i1501 = 0; $_i1501 < $_size1497; ++$_i1501) + $_size1504 = 0; + $_etype1507 = 0; + $xfer += $input->readListBegin($_etype1507, $_size1504); + for ($_i1508 = 0; $_i1508 < $_size1504; ++$_i1508) { - $elem1502 = null; - $xfer += $input->readString($elem1502); - $this->names []= $elem1502; + $elem1509 = null; + $xfer += $input->readString($elem1509); + $this->names []= $elem1509; } $xfer += $input->readListEnd(); } else { @@ -38578,9 +38694,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1503) + foreach ($this->names as $iter1510) { - $xfer += $output->writeString($iter1503); + $xfer += $output->writeString($iter1510); } } $output->writeListEnd(); @@ -38669,15 +38785,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1504 = 0; - $_etype1507 = 0; - $xfer += $input->readListBegin($_etype1507, $_size1504); - for ($_i1508 = 0; $_i1508 < $_size1504; ++$_i1508) + $_size1511 = 0; + $_etype1514 = 0; + $xfer += $input->readListBegin($_etype1514, $_size1511); + for ($_i1515 = 0; $_i1515 < $_size1511; ++$_i1515) { - $elem1509 = null; - $elem1509 = new \metastore\Partition(); - $xfer += $elem1509->read($input); - $this->success []= $elem1509; + $elem1516 = null; + $elem1516 = new \metastore\Partition(); + $xfer += $elem1516->read($input); + $this->success []= $elem1516; } $xfer += $input->readListEnd(); } else { @@ -38721,9 +38837,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1510) + foreach ($this->success as $iter1517) { - $xfer += $iter1510->write($output); + $xfer += $iter1517->write($output); } } $output->writeListEnd(); @@ -39272,15 +39388,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1511 = 0; - $_etype1514 = 0; - $xfer += $input->readListBegin($_etype1514, $_size1511); - for ($_i1515 = 0; $_i1515 < $_size1511; ++$_i1515) + $_size1518 = 0; + $_etype1521 = 0; + $xfer += $input->readListBegin($_etype1521, $_size1518); + for ($_i1522 = 0; $_i1522 < $_size1518; ++$_i1522) { - $elem1516 = null; - $elem1516 = new \metastore\Partition(); - $xfer += $elem1516->read($input); - $this->new_parts []= $elem1516; + $elem1523 = null; + $elem1523 = new \metastore\Partition(); + $xfer += $elem1523->read($input); + $this->new_parts []= $elem1523; } $xfer += $input->readListEnd(); } else { @@ -39318,9 +39434,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1517) + foreach ($this->new_parts as $iter1524) { - $xfer += $iter1517->write($output); + $xfer += $iter1524->write($output); } } $output->writeListEnd(); @@ -39535,15 +39651,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1518 = 0; - $_etype1521 = 0; - $xfer += $input->readListBegin($_etype1521, $_size1518); - for ($_i1522 = 0; $_i1522 < $_size1518; ++$_i1522) + $_size1525 = 0; + $_etype1528 = 0; + $xfer += $input->readListBegin($_etype1528, $_size1525); + for ($_i1529 = 0; $_i1529 < $_size1525; ++$_i1529) { - $elem1523 = null; - $elem1523 = new \metastore\Partition(); - $xfer += $elem1523->read($input); - $this->new_parts []= $elem1523; + $elem1530 = null; + $elem1530 = new \metastore\Partition(); + $xfer += $elem1530->read($input); + $this->new_parts []= $elem1530; } $xfer += $input->readListEnd(); } else { @@ -39589,9 +39705,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1524) + foreach ($this->new_parts as $iter1531) { - $xfer += $iter1524->write($output); + $xfer += $iter1531->write($output); } } $output->writeListEnd(); @@ -40279,14 +40395,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1525 = 0; - $_etype1528 = 0; - $xfer += $input->readListBegin($_etype1528, $_size1525); - for ($_i1529 = 0; $_i1529 < $_size1525; ++$_i1529) + $_size1532 = 0; + $_etype1535 = 0; + $xfer += $input->readListBegin($_etype1535, $_size1532); + for ($_i1536 = 0; $_i1536 < $_size1532; ++$_i1536) { - $elem1530 = null; - $xfer += $input->readString($elem1530); - $this->part_vals []= $elem1530; + $elem1537 = null; + $xfer += $input->readString($elem1537); + $this->part_vals []= $elem1537; } $xfer += $input->readListEnd(); } else { @@ -40332,9 +40448,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1531) + foreach ($this->part_vals as $iter1538) { - $xfer += $output->writeString($iter1531); + $xfer += $output->writeString($iter1538); } } $output->writeListEnd(); @@ -40729,14 +40845,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1532 = 0; - $_etype1535 = 0; - $xfer += $input->readListBegin($_etype1535, $_size1532); - for ($_i1536 = 0; $_i1536 < $_size1532; ++$_i1536) + $_size1539 = 0; + $_etype1542 = 0; + $xfer += $input->readListBegin($_etype1542, $_size1539); + for ($_i1543 = 0; $_i1543 < $_size1539; ++$_i1543) { - $elem1537 = null; - $xfer += $input->readString($elem1537); - $this->part_vals []= $elem1537; + $elem1544 = null; + $xfer += $input->readString($elem1544); + $this->part_vals []= $elem1544; } $xfer += $input->readListEnd(); } else { @@ -40771,9 +40887,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1538) + foreach ($this->part_vals as $iter1545) { - $xfer += $output->writeString($iter1538); + $xfer += $output->writeString($iter1545); } } $output->writeListEnd(); @@ -41227,14 +41343,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1539 = 0; - $_etype1542 = 0; - $xfer += $input->readListBegin($_etype1542, $_size1539); - for ($_i1543 = 0; $_i1543 < $_size1539; ++$_i1543) + $_size1546 = 0; + $_etype1549 = 0; + $xfer += $input->readListBegin($_etype1549, $_size1546); + for ($_i1550 = 0; $_i1550 < $_size1546; ++$_i1550) { - $elem1544 = null; - $xfer += $input->readString($elem1544); - $this->success []= $elem1544; + $elem1551 = null; + $xfer += $input->readString($elem1551); + $this->success []= $elem1551; } $xfer += $input->readListEnd(); } else { @@ -41270,9 +41386,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1545) + foreach ($this->success as $iter1552) { - $xfer += $output->writeString($iter1545); + $xfer += $output->writeString($iter1552); } } $output->writeListEnd(); @@ -41432,17 +41548,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1546 = 0; - $_ktype1547 = 0; - $_vtype1548 = 0; - $xfer += $input->readMapBegin($_ktype1547, $_vtype1548, $_size1546); - for ($_i1550 = 0; $_i1550 < $_size1546; ++$_i1550) + $_size1553 = 0; + $_ktype1554 = 0; + $_vtype1555 = 0; + $xfer += $input->readMapBegin($_ktype1554, $_vtype1555, $_size1553); + for ($_i1557 = 0; $_i1557 < $_size1553; ++$_i1557) { - $key1551 = ''; - $val1552 = ''; - $xfer += $input->readString($key1551); - $xfer += $input->readString($val1552); - $this->success[$key1551] = $val1552; + $key1558 = ''; + $val1559 = ''; + $xfer += $input->readString($key1558); + $xfer += $input->readString($val1559); + $this->success[$key1558] = $val1559; } $xfer += $input->readMapEnd(); } else { @@ -41478,10 +41594,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1553 => $viter1554) + foreach ($this->success as $kiter1560 => $viter1561) { - $xfer += $output->writeString($kiter1553); - $xfer += $output->writeString($viter1554); + $xfer += $output->writeString($kiter1560); + $xfer += $output->writeString($viter1561); } } $output->writeMapEnd(); @@ -41601,17 +41717,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1555 = 0; - $_ktype1556 = 0; - $_vtype1557 = 0; - $xfer += $input->readMapBegin($_ktype1556, $_vtype1557, $_size1555); - for ($_i1559 = 0; $_i1559 < $_size1555; ++$_i1559) + $_size1562 = 0; + $_ktype1563 = 0; + $_vtype1564 = 0; + $xfer += $input->readMapBegin($_ktype1563, $_vtype1564, $_size1562); + for ($_i1566 = 0; $_i1566 < $_size1562; ++$_i1566) { - $key1560 = ''; - $val1561 = ''; - $xfer += $input->readString($key1560); - $xfer += $input->readString($val1561); - $this->part_vals[$key1560] = $val1561; + $key1567 = ''; + $val1568 = ''; + $xfer += $input->readString($key1567); + $xfer += $input->readString($val1568); + $this->part_vals[$key1567] = $val1568; } $xfer += $input->readMapEnd(); } else { @@ -41656,10 +41772,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1562 => $viter1563) + foreach ($this->part_vals as $kiter1569 => $viter1570) { - $xfer += $output->writeString($kiter1562); - $xfer += $output->writeString($viter1563); + $xfer += $output->writeString($kiter1569); + $xfer += $output->writeString($viter1570); } } $output->writeMapEnd(); @@ -41981,17 +42097,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1564 = 0; - $_ktype1565 = 0; - $_vtype1566 = 0; - $xfer += $input->readMapBegin($_ktype1565, $_vtype1566, $_size1564); - for ($_i1568 = 0; $_i1568 < $_size1564; ++$_i1568) + $_size1571 = 0; + $_ktype1572 = 0; + $_vtype1573 = 0; + $xfer += $input->readMapBegin($_ktype1572, $_vtype1573, $_size1571); + for ($_i1575 = 0; $_i1575 < $_size1571; ++$_i1575) { - $key1569 = ''; - $val1570 = ''; - $xfer += $input->readString($key1569); - $xfer += $input->readString($val1570); - $this->part_vals[$key1569] = $val1570; + $key1576 = ''; + $val1577 = ''; + $xfer += $input->readString($key1576); + $xfer += $input->readString($val1577); + $this->part_vals[$key1576] = $val1577; } $xfer += $input->readMapEnd(); } else { @@ -42036,10 +42152,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1571 => $viter1572) + foreach ($this->part_vals as $kiter1578 => $viter1579) { - $xfer += $output->writeString($kiter1571); - $xfer += $output->writeString($viter1572); + $xfer += $output->writeString($kiter1578); + $xfer += $output->writeString($viter1579); } } $output->writeMapEnd(); @@ -47564,14 +47680,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1573 = 0; - $_etype1576 = 0; - $xfer += $input->readListBegin($_etype1576, $_size1573); - for ($_i1577 = 0; $_i1577 < $_size1573; ++$_i1577) + $_size1580 = 0; + $_etype1583 = 0; + $xfer += $input->readListBegin($_etype1583, $_size1580); + for ($_i1584 = 0; $_i1584 < $_size1580; ++$_i1584) { - $elem1578 = null; - $xfer += $input->readString($elem1578); - $this->success []= $elem1578; + $elem1585 = null; + $xfer += $input->readString($elem1585); + $this->success []= $elem1585; } $xfer += $input->readListEnd(); } else { @@ -47607,9 +47723,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1579) + foreach ($this->success as $iter1586) { - $xfer += $output->writeString($iter1579); + $xfer += $output->writeString($iter1586); } } $output->writeListEnd(); @@ -48478,14 +48594,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1580 = 0; - $_etype1583 = 0; - $xfer += $input->readListBegin($_etype1583, $_size1580); - for ($_i1584 = 0; $_i1584 < $_size1580; ++$_i1584) + $_size1587 = 0; + $_etype1590 = 0; + $xfer += $input->readListBegin($_etype1590, $_size1587); + for ($_i1591 = 0; $_i1591 < $_size1587; ++$_i1591) { - $elem1585 = null; - $xfer += $input->readString($elem1585); - $this->success []= $elem1585; + $elem1592 = null; + $xfer += $input->readString($elem1592); + $this->success []= $elem1592; } $xfer += $input->readListEnd(); } else { @@ -48521,9 +48637,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1586) + foreach ($this->success as $iter1593) { - $xfer += $output->writeString($iter1586); + $xfer += $output->writeString($iter1593); } } $output->writeListEnd(); @@ -49214,15 +49330,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1587 = 0; - $_etype1590 = 0; - $xfer += $input->readListBegin($_etype1590, $_size1587); - for ($_i1591 = 0; $_i1591 < $_size1587; ++$_i1591) + $_size1594 = 0; + $_etype1597 = 0; + $xfer += $input->readListBegin($_etype1597, $_size1594); + for ($_i1598 = 0; $_i1598 < $_size1594; ++$_i1598) { - $elem1592 = null; - $elem1592 = new \metastore\Role(); - $xfer += $elem1592->read($input); - $this->success []= $elem1592; + $elem1599 = null; + $elem1599 = new \metastore\Role(); + $xfer += $elem1599->read($input); + $this->success []= $elem1599; } $xfer += $input->readListEnd(); } else { @@ -49258,9 +49374,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1593) + foreach ($this->success as $iter1600) { - $xfer += $iter1593->write($output); + $xfer += $iter1600->write($output); } } $output->writeListEnd(); @@ -49922,14 +50038,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1594 = 0; - $_etype1597 = 0; - $xfer += $input->readListBegin($_etype1597, $_size1594); - for ($_i1598 = 0; $_i1598 < $_size1594; ++$_i1598) + $_size1601 = 0; + $_etype1604 = 0; + $xfer += $input->readListBegin($_etype1604, $_size1601); + for ($_i1605 = 0; $_i1605 < $_size1601; ++$_i1605) { - $elem1599 = null; - $xfer += $input->readString($elem1599); - $this->group_names []= $elem1599; + $elem1606 = null; + $xfer += $input->readString($elem1606); + $this->group_names []= $elem1606; } $xfer += $input->readListEnd(); } else { @@ -49970,9 +50086,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1600) + foreach ($this->group_names as $iter1607) { - $xfer += $output->writeString($iter1600); + $xfer += $output->writeString($iter1607); } } $output->writeListEnd(); @@ -50280,15 +50396,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1601 = 0; - $_etype1604 = 0; - $xfer += $input->readListBegin($_etype1604, $_size1601); - for ($_i1605 = 0; $_i1605 < $_size1601; ++$_i1605) + $_size1608 = 0; + $_etype1611 = 0; + $xfer += $input->readListBegin($_etype1611, $_size1608); + for ($_i1612 = 0; $_i1612 < $_size1608; ++$_i1612) { - $elem1606 = null; - $elem1606 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1606->read($input); - $this->success []= $elem1606; + $elem1613 = null; + $elem1613 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1613->read($input); + $this->success []= $elem1613; } $xfer += $input->readListEnd(); } else { @@ -50324,9 +50440,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1607) + foreach ($this->success as $iter1614) { - $xfer += $iter1607->write($output); + $xfer += $iter1614->write($output); } } $output->writeListEnd(); @@ -51194,14 +51310,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1608 = 0; - $_etype1611 = 0; - $xfer += $input->readListBegin($_etype1611, $_size1608); - for ($_i1612 = 0; $_i1612 < $_size1608; ++$_i1612) + $_size1615 = 0; + $_etype1618 = 0; + $xfer += $input->readListBegin($_etype1618, $_size1615); + for ($_i1619 = 0; $_i1619 < $_size1615; ++$_i1619) { - $elem1613 = null; - $xfer += $input->readString($elem1613); - $this->group_names []= $elem1613; + $elem1620 = null; + $xfer += $input->readString($elem1620); + $this->group_names []= $elem1620; } $xfer += $input->readListEnd(); } else { @@ -51234,9 +51350,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1614) + foreach ($this->group_names as $iter1621) { - $xfer += $output->writeString($iter1614); + $xfer += $output->writeString($iter1621); } } $output->writeListEnd(); @@ -51312,14 +51428,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1615 = 0; - $_etype1618 = 0; - $xfer += $input->readListBegin($_etype1618, $_size1615); - for ($_i1619 = 0; $_i1619 < $_size1615; ++$_i1619) + $_size1622 = 0; + $_etype1625 = 0; + $xfer += $input->readListBegin($_etype1625, $_size1622); + for ($_i1626 = 0; $_i1626 < $_size1622; ++$_i1626) { - $elem1620 = null; - $xfer += $input->readString($elem1620); - $this->success []= $elem1620; + $elem1627 = null; + $xfer += $input->readString($elem1627); + $this->success []= $elem1627; } $xfer += $input->readListEnd(); } else { @@ -51355,9 +51471,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1621) + foreach ($this->success as $iter1628) { - $xfer += $output->writeString($iter1621); + $xfer += $output->writeString($iter1628); } } $output->writeListEnd(); @@ -52474,14 +52590,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1622 = 0; - $_etype1625 = 0; - $xfer += $input->readListBegin($_etype1625, $_size1622); - for ($_i1626 = 0; $_i1626 < $_size1622; ++$_i1626) + $_size1629 = 0; + $_etype1632 = 0; + $xfer += $input->readListBegin($_etype1632, $_size1629); + for ($_i1633 = 0; $_i1633 < $_size1629; ++$_i1633) { - $elem1627 = null; - $xfer += $input->readString($elem1627); - $this->success []= $elem1627; + $elem1634 = null; + $xfer += $input->readString($elem1634); + $this->success []= $elem1634; } $xfer += $input->readListEnd(); } else { @@ -52509,9 +52625,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1628) + foreach ($this->success as $iter1635) { - $xfer += $output->writeString($iter1628); + $xfer += $output->writeString($iter1635); } } $output->writeListEnd(); @@ -53150,14 +53266,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1629 = 0; - $_etype1632 = 0; - $xfer += $input->readListBegin($_etype1632, $_size1629); - for ($_i1633 = 0; $_i1633 < $_size1629; ++$_i1633) + $_size1636 = 0; + $_etype1639 = 0; + $xfer += $input->readListBegin($_etype1639, $_size1636); + for ($_i1640 = 0; $_i1640 < $_size1636; ++$_i1640) { - $elem1634 = null; - $xfer += $input->readString($elem1634); - $this->success []= $elem1634; + $elem1641 = null; + $xfer += $input->readString($elem1641); + $this->success []= $elem1641; } $xfer += $input->readListEnd(); } else { @@ -53185,9 +53301,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1635) + foreach ($this->success as $iter1642) { - $xfer += $output->writeString($iter1635); + $xfer += $output->writeString($iter1642); } } $output->writeListEnd(); @@ -56941,14 +57057,14 @@ class ThriftHiveMetastore_find_columns_with_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1636 = 0; - $_etype1639 = 0; - $xfer += $input->readListBegin($_etype1639, $_size1636); - for ($_i1640 = 0; $_i1640 < $_size1636; ++$_i1640) + $_size1643 = 0; + $_etype1646 = 0; + $xfer += $input->readListBegin($_etype1646, $_size1643); + for ($_i1647 = 0; $_i1647 < $_size1643; ++$_i1647) { - $elem1641 = null; - $xfer += $input->readString($elem1641); - $this->success []= $elem1641; + $elem1648 = null; + $xfer += $input->readString($elem1648); + $this->success []= $elem1648; } $xfer += $input->readListEnd(); } else { @@ -56976,9 +57092,9 @@ class ThriftHiveMetastore_find_columns_with_stats_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1642) + foreach ($this->success as $iter1649) { - $xfer += $output->writeString($iter1642); + $xfer += $output->writeString($iter1649); } } $output->writeListEnd(); @@ -65149,15 +65265,15 @@ class ThriftHiveMetastore_get_schema_all_versions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1643 = 0; - $_etype1646 = 0; - $xfer += $input->readListBegin($_etype1646, $_size1643); - for ($_i1647 = 0; $_i1647 < $_size1643; ++$_i1647) + $_size1650 = 0; + $_etype1653 = 0; + $xfer += $input->readListBegin($_etype1653, $_size1650); + for ($_i1654 = 0; $_i1654 < $_size1650; ++$_i1654) { - $elem1648 = null; - $elem1648 = new \metastore\SchemaVersion(); - $xfer += $elem1648->read($input); - $this->success []= $elem1648; + $elem1655 = null; + $elem1655 = new \metastore\SchemaVersion(); + $xfer += $elem1655->read($input); + $this->success []= $elem1655; } $xfer += $input->readListEnd(); } else { @@ -65201,9 +65317,9 @@ class ThriftHiveMetastore_get_schema_all_versions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1649) + foreach ($this->success as $iter1656) { - $xfer += $iter1649->write($output); + $xfer += $iter1656->write($output); } } $output->writeListEnd(); @@ -67072,15 +67188,15 @@ class ThriftHiveMetastore_get_runtime_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1650 = 0; - $_etype1653 = 0; - $xfer += $input->readListBegin($_etype1653, $_size1650); - for ($_i1654 = 0; $_i1654 < $_size1650; ++$_i1654) + $_size1657 = 0; + $_etype1660 = 0; + $xfer += $input->readListBegin($_etype1660, $_size1657); + for ($_i1661 = 0; $_i1661 < $_size1657; ++$_i1661) { - $elem1655 = null; - $elem1655 = new \metastore\RuntimeStat(); - $xfer += $elem1655->read($input); - $this->success []= $elem1655; + $elem1662 = null; + $elem1662 = new \metastore\RuntimeStat(); + $xfer += $elem1662->read($input); + $this->success []= $elem1662; } $xfer += $input->readListEnd(); } else { @@ -67116,9 +67232,9 @@ class ThriftHiveMetastore_get_runtime_stats_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1656) + foreach ($this->success as $iter1663) { - $xfer += $iter1656->write($output); + $xfer += $iter1663->write($output); } } $output->writeListEnd(); @@ -68131,4 +68247,346 @@ class ThriftHiveMetastore_get_scheduled_query_result { } +class ThriftHiveMetastore_add_replication_metrics_args { + static $_TSPEC; + + /** + * @var \metastore\ReplicationMetricList + */ + public $replicationMetricList = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'replicationMetricList', + 'type' => TType::STRUCT, + 'class' => '\metastore\ReplicationMetricList', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['replicationMetricList'])) { + $this->replicationMetricList = $vals['replicationMetricList']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_add_replication_metrics_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->replicationMetricList = new \metastore\ReplicationMetricList(); + $xfer += $this->replicationMetricList->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_replication_metrics_args'); + if ($this->replicationMetricList !== null) { + if (!is_object($this->replicationMetricList)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('replicationMetricList', TType::STRUCT, 1); + $xfer += $this->replicationMetricList->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_add_replication_metrics_result { + static $_TSPEC; + + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_add_replication_metrics_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_replication_metrics_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_replication_metrics_args { + static $_TSPEC; + + /** + * @var \metastore\GetReplicationMetricsRequest + */ + public $rqst = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'rqst', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetReplicationMetricsRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['rqst'])) { + $this->rqst = $vals['rqst']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_replication_metrics_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->rqst = new \metastore\GetReplicationMetricsRequest(); + $xfer += $this->rqst->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_replication_metrics_args'); + if ($this->rqst !== null) { + if (!is_object($this->rqst)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); + $xfer += $this->rqst->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_replication_metrics_result { + static $_TSPEC; + + /** + * @var \metastore\ReplicationMetricList + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\ReplicationMetricList', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_replication_metrics_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\ReplicationMetricList(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_replication_metrics_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php index a09347a615..808c6be94a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php @@ -40120,6 +40120,397 @@ class GetPartitionsPsWithAuthResponse { } +class ReplicationMetrics { + static $_TSPEC; + + /** + * @var int + */ + public $scheduledExecutionId = null; + /** + * @var string + */ + public $policy = null; + /** + * @var int + */ + public $dumpExecutionId = null; + /** + * @var string + */ + public $metadata = null; + /** + * @var string + */ + public $progress = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'scheduledExecutionId', + 'type' => TType::I64, + ), + 2 => array( + 'var' => 'policy', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'dumpExecutionId', + 'type' => TType::I64, + ), + 4 => array( + 'var' => 'metadata', + 'type' => TType::STRING, + ), + 5 => array( + 'var' => 'progress', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['scheduledExecutionId'])) { + $this->scheduledExecutionId = $vals['scheduledExecutionId']; + } + if (isset($vals['policy'])) { + $this->policy = $vals['policy']; + } + if (isset($vals['dumpExecutionId'])) { + $this->dumpExecutionId = $vals['dumpExecutionId']; + } + if (isset($vals['metadata'])) { + $this->metadata = $vals['metadata']; + } + if (isset($vals['progress'])) { + $this->progress = $vals['progress']; + } + } + } + + public function getName() { + return 'ReplicationMetrics'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->scheduledExecutionId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->policy); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->dumpExecutionId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->metadata); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->progress); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ReplicationMetrics'); + if ($this->scheduledExecutionId !== null) { + $xfer += $output->writeFieldBegin('scheduledExecutionId', TType::I64, 1); + $xfer += $output->writeI64($this->scheduledExecutionId); + $xfer += $output->writeFieldEnd(); + } + if ($this->policy !== null) { + $xfer += $output->writeFieldBegin('policy', TType::STRING, 2); + $xfer += $output->writeString($this->policy); + $xfer += $output->writeFieldEnd(); + } + if ($this->dumpExecutionId !== null) { + $xfer += $output->writeFieldBegin('dumpExecutionId', TType::I64, 3); + $xfer += $output->writeI64($this->dumpExecutionId); + $xfer += $output->writeFieldEnd(); + } + if ($this->metadata !== null) { + $xfer += $output->writeFieldBegin('metadata', TType::STRING, 4); + $xfer += $output->writeString($this->metadata); + $xfer += $output->writeFieldEnd(); + } + if ($this->progress !== null) { + $xfer += $output->writeFieldBegin('progress', TType::STRING, 5); + $xfer += $output->writeString($this->progress); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ReplicationMetricList { + static $_TSPEC; + + /** + * @var \metastore\ReplicationMetrics[] + */ + public $replicationMetricList = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'replicationMetricList', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\ReplicationMetrics', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['replicationMetricList'])) { + $this->replicationMetricList = $vals['replicationMetricList']; + } + } + } + + public function getName() { + return 'ReplicationMetricList'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->replicationMetricList = array(); + $_size1127 = 0; + $_etype1130 = 0; + $xfer += $input->readListBegin($_etype1130, $_size1127); + for ($_i1131 = 0; $_i1131 < $_size1127; ++$_i1131) + { + $elem1132 = null; + $elem1132 = new \metastore\ReplicationMetrics(); + $xfer += $elem1132->read($input); + $this->replicationMetricList []= $elem1132; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ReplicationMetricList'); + if ($this->replicationMetricList !== null) { + if (!is_array($this->replicationMetricList)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('replicationMetricList', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->replicationMetricList)); + { + foreach ($this->replicationMetricList as $iter1133) + { + $xfer += $iter1133->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetReplicationMetricsRequest { + static $_TSPEC; + + /** + * @var int + */ + public $scheduledExecutionId = null; + /** + * @var string + */ + public $policy = null; + /** + * @var int + */ + public $dumpExecutionId = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'scheduledExecutionId', + 'type' => TType::I64, + ), + 2 => array( + 'var' => 'policy', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'dumpExecutionId', + 'type' => TType::I64, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['scheduledExecutionId'])) { + $this->scheduledExecutionId = $vals['scheduledExecutionId']; + } + if (isset($vals['policy'])) { + $this->policy = $vals['policy']; + } + if (isset($vals['dumpExecutionId'])) { + $this->dumpExecutionId = $vals['dumpExecutionId']; + } + } + } + + public function getName() { + return 'GetReplicationMetricsRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->scheduledExecutionId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->policy); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->dumpExecutionId); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetReplicationMetricsRequest'); + if ($this->scheduledExecutionId !== null) { + $xfer += $output->writeFieldBegin('scheduledExecutionId', TType::I64, 1); + $xfer += $output->writeI64($this->scheduledExecutionId); + $xfer += $output->writeFieldEnd(); + } + if ($this->policy !== null) { + $xfer += $output->writeFieldBegin('policy', TType::STRING, 2); + $xfer += $output->writeString($this->policy); + $xfer += $output->writeFieldEnd(); + } + if ($this->dumpExecutionId !== null) { + $xfer += $output->writeFieldBegin('dumpExecutionId', TType::I64, 3); + $xfer += $output->writeI64($this->dumpExecutionId); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class MetaException extends TException { static $_TSPEC; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index df031e73c5..6c6d7864a3 100755 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -263,6 +263,8 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' void scheduled_query_maintenance(ScheduledQueryMaintenanceRequest request)') print(' void scheduled_query_progress(ScheduledQueryProgressInfo info)') print(' ScheduledQuery get_scheduled_query(ScheduledQueryKey scheduleKey)') + print(' void add_replication_metrics(ReplicationMetricList replicationMetricList)') + print(' ReplicationMetricList get_replication_metrics(GetReplicationMetricsRequest rqst)') print(' string getName()') print(' string getVersion()') print(' fb_status getStatus()') @@ -1766,6 +1768,18 @@ elif cmd == 'get_scheduled_query': sys.exit(1) pp.pprint(client.get_scheduled_query(eval(args[0]),)) +elif cmd == 'add_replication_metrics': + if len(args) != 1: + print('add_replication_metrics requires 1 args') + sys.exit(1) + pp.pprint(client.add_replication_metrics(eval(args[0]),)) + +elif cmd == 'get_replication_metrics': + if len(args) != 1: + print('get_replication_metrics requires 1 args') + sys.exit(1) + pp.pprint(client.get_replication_metrics(eval(args[0]),)) + elif cmd == 'getName': if len(args) != 0: print('getName requires 0 args') diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 1e57a85e0d..7769058ec8 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -1827,6 +1827,20 @@ def get_scheduled_query(self, scheduleKey): """ pass + def add_replication_metrics(self, replicationMetricList): + """ + Parameters: + - replicationMetricList + """ + pass + + def get_replication_metrics(self, rqst): + """ + Parameters: + - rqst + """ + pass + class Client(fb303.FacebookService.Client, Iface): """ @@ -10318,6 +10332,70 @@ def recv_get_scheduled_query(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_scheduled_query failed: unknown result") + def add_replication_metrics(self, replicationMetricList): + """ + Parameters: + - replicationMetricList + """ + self.send_add_replication_metrics(replicationMetricList) + self.recv_add_replication_metrics() + + def send_add_replication_metrics(self, replicationMetricList): + self._oprot.writeMessageBegin('add_replication_metrics', TMessageType.CALL, self._seqid) + args = add_replication_metrics_args() + args.replicationMetricList = replicationMetricList + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_replication_metrics(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_replication_metrics_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def get_replication_metrics(self, rqst): + """ + Parameters: + - rqst + """ + self.send_get_replication_metrics(rqst) + return self.recv_get_replication_metrics() + + def send_get_replication_metrics(self, rqst): + self._oprot.writeMessageBegin('get_replication_metrics', TMessageType.CALL, self._seqid) + args = get_replication_metrics_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_replication_metrics(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_replication_metrics_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_replication_metrics failed: unknown result") + class Processor(fb303.FacebookService.Processor, Iface, TProcessor): def __init__(self, handler): @@ -10561,6 +10639,8 @@ def __init__(self, handler): self._processMap["scheduled_query_maintenance"] = Processor.process_scheduled_query_maintenance self._processMap["scheduled_query_progress"] = Processor.process_scheduled_query_progress self._processMap["get_scheduled_query"] = Processor.process_get_scheduled_query + self._processMap["add_replication_metrics"] = Processor.process_add_replication_metrics + self._processMap["get_replication_metrics"] = Processor.process_get_replication_metrics def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() @@ -16465,6 +16545,50 @@ def process_get_scheduled_query(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_add_replication_metrics(self, seqid, iprot, oprot): + args = add_replication_metrics_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_replication_metrics_result() + try: + self._handler.add_replication_metrics(args.replicationMetricList) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("add_replication_metrics", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_replication_metrics(self, seqid, iprot, oprot): + args = get_replication_metrics_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_replication_metrics_result() + try: + result.success = self._handler.get_replication_metrics(args.rqst) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_replication_metrics", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + # HELPER FUNCTIONS AND STRUCTURES @@ -18274,10 +18398,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1127, _size1124) = iprot.readListBegin() - for _i1128 in xrange(_size1124): - _elem1129 = iprot.readString() - self.success.append(_elem1129) + (_etype1134, _size1131) = iprot.readListBegin() + for _i1135 in xrange(_size1131): + _elem1136 = iprot.readString() + self.success.append(_elem1136) iprot.readListEnd() else: iprot.skip(ftype) @@ -18300,8 +18424,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1130 in self.success: - oprot.writeString(iter1130) + for iter1137 in self.success: + oprot.writeString(iter1137) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18406,10 +18530,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1134, _size1131) = iprot.readListBegin() - for _i1135 in xrange(_size1131): - _elem1136 = iprot.readString() - self.success.append(_elem1136) + (_etype1141, _size1138) = iprot.readListBegin() + for _i1142 in xrange(_size1138): + _elem1143 = iprot.readString() + self.success.append(_elem1143) iprot.readListEnd() else: iprot.skip(ftype) @@ -18432,8 +18556,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1137 in self.success: - oprot.writeString(iter1137) + for iter1144 in self.success: + oprot.writeString(iter1144) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19203,12 +19327,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1139, _vtype1140, _size1138 ) = iprot.readMapBegin() - for _i1142 in xrange(_size1138): - _key1143 = iprot.readString() - _val1144 = Type() - _val1144.read(iprot) - self.success[_key1143] = _val1144 + (_ktype1146, _vtype1147, _size1145 ) = iprot.readMapBegin() + for _i1149 in xrange(_size1145): + _key1150 = iprot.readString() + _val1151 = Type() + _val1151.read(iprot) + self.success[_key1150] = _val1151 iprot.readMapEnd() else: iprot.skip(ftype) @@ -19231,9 +19355,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter1145,viter1146 in self.success.items(): - oprot.writeString(kiter1145) - viter1146.write(oprot) + for kiter1152,viter1153 in self.success.items(): + oprot.writeString(kiter1152) + viter1153.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -19376,11 +19500,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1150, _size1147) = iprot.readListBegin() - for _i1151 in xrange(_size1147): - _elem1152 = FieldSchema() - _elem1152.read(iprot) - self.success.append(_elem1152) + (_etype1157, _size1154) = iprot.readListBegin() + for _i1158 in xrange(_size1154): + _elem1159 = FieldSchema() + _elem1159.read(iprot) + self.success.append(_elem1159) iprot.readListEnd() else: iprot.skip(ftype) @@ -19415,8 +19539,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1153 in self.success: - iter1153.write(oprot) + for iter1160 in self.success: + iter1160.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19583,11 +19707,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1157, _size1154) = iprot.readListBegin() - for _i1158 in xrange(_size1154): - _elem1159 = FieldSchema() - _elem1159.read(iprot) - self.success.append(_elem1159) + (_etype1164, _size1161) = iprot.readListBegin() + for _i1165 in xrange(_size1161): + _elem1166 = FieldSchema() + _elem1166.read(iprot) + self.success.append(_elem1166) iprot.readListEnd() else: iprot.skip(ftype) @@ -19622,8 +19746,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1160 in self.success: - iter1160.write(oprot) + for iter1167 in self.success: + iter1167.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19949,11 +20073,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1164, _size1161) = iprot.readListBegin() - for _i1165 in xrange(_size1161): - _elem1166 = FieldSchema() - _elem1166.read(iprot) - self.success.append(_elem1166) + (_etype1171, _size1168) = iprot.readListBegin() + for _i1172 in xrange(_size1168): + _elem1173 = FieldSchema() + _elem1173.read(iprot) + self.success.append(_elem1173) iprot.readListEnd() else: iprot.skip(ftype) @@ -19988,8 +20112,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1167 in self.success: - iter1167.write(oprot) + for iter1174 in self.success: + iter1174.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20156,11 +20280,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1171, _size1168) = iprot.readListBegin() - for _i1172 in xrange(_size1168): - _elem1173 = FieldSchema() - _elem1173.read(iprot) - self.success.append(_elem1173) + (_etype1178, _size1175) = iprot.readListBegin() + for _i1179 in xrange(_size1175): + _elem1180 = FieldSchema() + _elem1180.read(iprot) + self.success.append(_elem1180) iprot.readListEnd() else: iprot.skip(ftype) @@ -20195,8 +20319,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1174 in self.success: - iter1174.write(oprot) + for iter1181 in self.success: + iter1181.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20822,66 +20946,66 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype1178, _size1175) = iprot.readListBegin() - for _i1179 in xrange(_size1175): - _elem1180 = SQLPrimaryKey() - _elem1180.read(iprot) - self.primaryKeys.append(_elem1180) + (_etype1185, _size1182) = iprot.readListBegin() + for _i1186 in xrange(_size1182): + _elem1187 = SQLPrimaryKey() + _elem1187.read(iprot) + self.primaryKeys.append(_elem1187) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype1184, _size1181) = iprot.readListBegin() - for _i1185 in xrange(_size1181): - _elem1186 = SQLForeignKey() - _elem1186.read(iprot) - self.foreignKeys.append(_elem1186) + (_etype1191, _size1188) = iprot.readListBegin() + for _i1192 in xrange(_size1188): + _elem1193 = SQLForeignKey() + _elem1193.read(iprot) + self.foreignKeys.append(_elem1193) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype1190, _size1187) = iprot.readListBegin() - for _i1191 in xrange(_size1187): - _elem1192 = SQLUniqueConstraint() - _elem1192.read(iprot) - self.uniqueConstraints.append(_elem1192) + (_etype1197, _size1194) = iprot.readListBegin() + for _i1198 in xrange(_size1194): + _elem1199 = SQLUniqueConstraint() + _elem1199.read(iprot) + self.uniqueConstraints.append(_elem1199) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype1196, _size1193) = iprot.readListBegin() - for _i1197 in xrange(_size1193): - _elem1198 = SQLNotNullConstraint() - _elem1198.read(iprot) - self.notNullConstraints.append(_elem1198) + (_etype1203, _size1200) = iprot.readListBegin() + for _i1204 in xrange(_size1200): + _elem1205 = SQLNotNullConstraint() + _elem1205.read(iprot) + self.notNullConstraints.append(_elem1205) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype1202, _size1199) = iprot.readListBegin() - for _i1203 in xrange(_size1199): - _elem1204 = SQLDefaultConstraint() - _elem1204.read(iprot) - self.defaultConstraints.append(_elem1204) + (_etype1209, _size1206) = iprot.readListBegin() + for _i1210 in xrange(_size1206): + _elem1211 = SQLDefaultConstraint() + _elem1211.read(iprot) + self.defaultConstraints.append(_elem1211) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.checkConstraints = [] - (_etype1208, _size1205) = iprot.readListBegin() - for _i1209 in xrange(_size1205): - _elem1210 = SQLCheckConstraint() - _elem1210.read(iprot) - self.checkConstraints.append(_elem1210) + (_etype1215, _size1212) = iprot.readListBegin() + for _i1216 in xrange(_size1212): + _elem1217 = SQLCheckConstraint() + _elem1217.read(iprot) + self.checkConstraints.append(_elem1217) iprot.readListEnd() else: iprot.skip(ftype) @@ -20902,43 +21026,43 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter1211 in self.primaryKeys: - iter1211.write(oprot) + for iter1218 in self.primaryKeys: + iter1218.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter1212 in self.foreignKeys: - iter1212.write(oprot) + for iter1219 in self.foreignKeys: + iter1219.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter1213 in self.uniqueConstraints: - iter1213.write(oprot) + for iter1220 in self.uniqueConstraints: + iter1220.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter1214 in self.notNullConstraints: - iter1214.write(oprot) + for iter1221 in self.notNullConstraints: + iter1221.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter1215 in self.defaultConstraints: - iter1215.write(oprot) + for iter1222 in self.defaultConstraints: + iter1222.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 7) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter1216 in self.checkConstraints: - iter1216.write(oprot) + for iter1223 in self.checkConstraints: + iter1223.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22672,10 +22796,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype1220, _size1217) = iprot.readListBegin() - for _i1221 in xrange(_size1217): - _elem1222 = iprot.readString() - self.partNames.append(_elem1222) + (_etype1227, _size1224) = iprot.readListBegin() + for _i1228 in xrange(_size1224): + _elem1229 = iprot.readString() + self.partNames.append(_elem1229) iprot.readListEnd() else: iprot.skip(ftype) @@ -22700,8 +22824,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter1223 in self.partNames: - oprot.writeString(iter1223) + for iter1230 in self.partNames: + oprot.writeString(iter1230) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23046,10 +23170,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1227, _size1224) = iprot.readListBegin() - for _i1228 in xrange(_size1224): - _elem1229 = iprot.readString() - self.success.append(_elem1229) + (_etype1234, _size1231) = iprot.readListBegin() + for _i1235 in xrange(_size1231): + _elem1236 = iprot.readString() + self.success.append(_elem1236) iprot.readListEnd() else: iprot.skip(ftype) @@ -23072,8 +23196,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1230 in self.success: - oprot.writeString(iter1230) + for iter1237 in self.success: + oprot.writeString(iter1237) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23223,10 +23347,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1234, _size1231) = iprot.readListBegin() - for _i1235 in xrange(_size1231): - _elem1236 = iprot.readString() - self.success.append(_elem1236) + (_etype1241, _size1238) = iprot.readListBegin() + for _i1242 in xrange(_size1238): + _elem1243 = iprot.readString() + self.success.append(_elem1243) iprot.readListEnd() else: iprot.skip(ftype) @@ -23249,8 +23373,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1237 in self.success: - oprot.writeString(iter1237) + for iter1244 in self.success: + oprot.writeString(iter1244) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23355,11 +23479,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1241, _size1238) = iprot.readListBegin() - for _i1242 in xrange(_size1238): - _elem1243 = Table() - _elem1243.read(iprot) - self.success.append(_elem1243) + (_etype1248, _size1245) = iprot.readListBegin() + for _i1249 in xrange(_size1245): + _elem1250 = Table() + _elem1250.read(iprot) + self.success.append(_elem1250) iprot.readListEnd() else: iprot.skip(ftype) @@ -23382,8 +23506,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1244 in self.success: - iter1244.write(oprot) + for iter1251 in self.success: + iter1251.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23507,10 +23631,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1248, _size1245) = iprot.readListBegin() - for _i1249 in xrange(_size1245): - _elem1250 = iprot.readString() - self.success.append(_elem1250) + (_etype1255, _size1252) = iprot.readListBegin() + for _i1256 in xrange(_size1252): + _elem1257 = iprot.readString() + self.success.append(_elem1257) iprot.readListEnd() else: iprot.skip(ftype) @@ -23533,8 +23657,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1251 in self.success: - oprot.writeString(iter1251) + for iter1258 in self.success: + oprot.writeString(iter1258) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23607,10 +23731,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype1255, _size1252) = iprot.readListBegin() - for _i1256 in xrange(_size1252): - _elem1257 = iprot.readString() - self.tbl_types.append(_elem1257) + (_etype1262, _size1259) = iprot.readListBegin() + for _i1263 in xrange(_size1259): + _elem1264 = iprot.readString() + self.tbl_types.append(_elem1264) iprot.readListEnd() else: iprot.skip(ftype) @@ -23635,8 +23759,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter1258 in self.tbl_types: - oprot.writeString(iter1258) + for iter1265 in self.tbl_types: + oprot.writeString(iter1265) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23692,11 +23816,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1262, _size1259) = iprot.readListBegin() - for _i1263 in xrange(_size1259): - _elem1264 = TableMeta() - _elem1264.read(iprot) - self.success.append(_elem1264) + (_etype1269, _size1266) = iprot.readListBegin() + for _i1270 in xrange(_size1266): + _elem1271 = TableMeta() + _elem1271.read(iprot) + self.success.append(_elem1271) iprot.readListEnd() else: iprot.skip(ftype) @@ -23719,8 +23843,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1265 in self.success: - iter1265.write(oprot) + for iter1272 in self.success: + iter1272.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23844,10 +23968,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1269, _size1266) = iprot.readListBegin() - for _i1270 in xrange(_size1266): - _elem1271 = iprot.readString() - self.success.append(_elem1271) + (_etype1276, _size1273) = iprot.readListBegin() + for _i1277 in xrange(_size1273): + _elem1278 = iprot.readString() + self.success.append(_elem1278) iprot.readListEnd() else: iprot.skip(ftype) @@ -23870,8 +23994,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1272 in self.success: - oprot.writeString(iter1272) + for iter1279 in self.success: + oprot.writeString(iter1279) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24107,10 +24231,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype1276, _size1273) = iprot.readListBegin() - for _i1277 in xrange(_size1273): - _elem1278 = iprot.readString() - self.tbl_names.append(_elem1278) + (_etype1283, _size1280) = iprot.readListBegin() + for _i1284 in xrange(_size1280): + _elem1285 = iprot.readString() + self.tbl_names.append(_elem1285) iprot.readListEnd() else: iprot.skip(ftype) @@ -24131,8 +24255,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter1279 in self.tbl_names: - oprot.writeString(iter1279) + for iter1286 in self.tbl_names: + oprot.writeString(iter1286) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24184,11 +24308,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1283, _size1280) = iprot.readListBegin() - for _i1284 in xrange(_size1280): - _elem1285 = Table() - _elem1285.read(iprot) - self.success.append(_elem1285) + (_etype1290, _size1287) = iprot.readListBegin() + for _i1291 in xrange(_size1287): + _elem1292 = Table() + _elem1292.read(iprot) + self.success.append(_elem1292) iprot.readListEnd() else: iprot.skip(ftype) @@ -24205,8 +24329,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1286 in self.success: - iter1286.write(oprot) + for iter1293 in self.success: + iter1293.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24326,11 +24450,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1290, _size1287) = iprot.readListBegin() - for _i1291 in xrange(_size1287): - _elem1292 = ExtendedTableInfo() - _elem1292.read(iprot) - self.success.append(_elem1292) + (_etype1297, _size1294) = iprot.readListBegin() + for _i1298 in xrange(_size1294): + _elem1299 = ExtendedTableInfo() + _elem1299.read(iprot) + self.success.append(_elem1299) iprot.readListEnd() else: iprot.skip(ftype) @@ -24353,8 +24477,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1293 in self.success: - iter1293.write(oprot) + for iter1300 in self.success: + iter1300.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25227,10 +25351,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1297, _size1294) = iprot.readListBegin() - for _i1298 in xrange(_size1294): - _elem1299 = iprot.readString() - self.success.append(_elem1299) + (_etype1304, _size1301) = iprot.readListBegin() + for _i1305 in xrange(_size1301): + _elem1306 = iprot.readString() + self.success.append(_elem1306) iprot.readListEnd() else: iprot.skip(ftype) @@ -25265,8 +25389,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1300 in self.success: - oprot.writeString(iter1300) + for iter1307 in self.success: + oprot.writeString(iter1307) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26395,11 +26519,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1304, _size1301) = iprot.readListBegin() - for _i1305 in xrange(_size1301): - _elem1306 = Partition() - _elem1306.read(iprot) - self.new_parts.append(_elem1306) + (_etype1311, _size1308) = iprot.readListBegin() + for _i1312 in xrange(_size1308): + _elem1313 = Partition() + _elem1313.read(iprot) + self.new_parts.append(_elem1313) iprot.readListEnd() else: iprot.skip(ftype) @@ -26416,8 +26540,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1307 in self.new_parts: - iter1307.write(oprot) + for iter1314 in self.new_parts: + iter1314.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26575,11 +26699,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1311, _size1308) = iprot.readListBegin() - for _i1312 in xrange(_size1308): - _elem1313 = PartitionSpec() - _elem1313.read(iprot) - self.new_parts.append(_elem1313) + (_etype1318, _size1315) = iprot.readListBegin() + for _i1319 in xrange(_size1315): + _elem1320 = PartitionSpec() + _elem1320.read(iprot) + self.new_parts.append(_elem1320) iprot.readListEnd() else: iprot.skip(ftype) @@ -26596,8 +26720,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1314 in self.new_parts: - iter1314.write(oprot) + for iter1321 in self.new_parts: + iter1321.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26771,10 +26895,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1318, _size1315) = iprot.readListBegin() - for _i1319 in xrange(_size1315): - _elem1320 = iprot.readString() - self.part_vals.append(_elem1320) + (_etype1325, _size1322) = iprot.readListBegin() + for _i1326 in xrange(_size1322): + _elem1327 = iprot.readString() + self.part_vals.append(_elem1327) iprot.readListEnd() else: iprot.skip(ftype) @@ -26799,8 +26923,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1321 in self.part_vals: - oprot.writeString(iter1321) + for iter1328 in self.part_vals: + oprot.writeString(iter1328) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -27153,10 +27277,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1325, _size1322) = iprot.readListBegin() - for _i1326 in xrange(_size1322): - _elem1327 = iprot.readString() - self.part_vals.append(_elem1327) + (_etype1332, _size1329) = iprot.readListBegin() + for _i1333 in xrange(_size1329): + _elem1334 = iprot.readString() + self.part_vals.append(_elem1334) iprot.readListEnd() else: iprot.skip(ftype) @@ -27187,8 +27311,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1328 in self.part_vals: - oprot.writeString(iter1328) + for iter1335 in self.part_vals: + oprot.writeString(iter1335) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -27783,10 +27907,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1332, _size1329) = iprot.readListBegin() - for _i1333 in xrange(_size1329): - _elem1334 = iprot.readString() - self.part_vals.append(_elem1334) + (_etype1339, _size1336) = iprot.readListBegin() + for _i1340 in xrange(_size1336): + _elem1341 = iprot.readString() + self.part_vals.append(_elem1341) iprot.readListEnd() else: iprot.skip(ftype) @@ -27816,8 +27940,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1335 in self.part_vals: - oprot.writeString(iter1335) + for iter1342 in self.part_vals: + oprot.writeString(iter1342) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -27990,10 +28114,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1339, _size1336) = iprot.readListBegin() - for _i1340 in xrange(_size1336): - _elem1341 = iprot.readString() - self.part_vals.append(_elem1341) + (_etype1346, _size1343) = iprot.readListBegin() + for _i1347 in xrange(_size1343): + _elem1348 = iprot.readString() + self.part_vals.append(_elem1348) iprot.readListEnd() else: iprot.skip(ftype) @@ -28029,8 +28153,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1342 in self.part_vals: - oprot.writeString(iter1342) + for iter1349 in self.part_vals: + oprot.writeString(iter1349) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -28767,10 +28891,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1346, _size1343) = iprot.readListBegin() - for _i1347 in xrange(_size1343): - _elem1348 = iprot.readString() - self.part_vals.append(_elem1348) + (_etype1353, _size1350) = iprot.readListBegin() + for _i1354 in xrange(_size1350): + _elem1355 = iprot.readString() + self.part_vals.append(_elem1355) iprot.readListEnd() else: iprot.skip(ftype) @@ -28795,8 +28919,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1349 in self.part_vals: - oprot.writeString(iter1349) + for iter1356 in self.part_vals: + oprot.writeString(iter1356) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -29114,11 +29238,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1351, _vtype1352, _size1350 ) = iprot.readMapBegin() - for _i1354 in xrange(_size1350): - _key1355 = iprot.readString() - _val1356 = iprot.readString() - self.partitionSpecs[_key1355] = _val1356 + (_ktype1358, _vtype1359, _size1357 ) = iprot.readMapBegin() + for _i1361 in xrange(_size1357): + _key1362 = iprot.readString() + _val1363 = iprot.readString() + self.partitionSpecs[_key1362] = _val1363 iprot.readMapEnd() else: iprot.skip(ftype) @@ -29155,9 +29279,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1357,viter1358 in self.partitionSpecs.items(): - oprot.writeString(kiter1357) - oprot.writeString(viter1358) + for kiter1364,viter1365 in self.partitionSpecs.items(): + oprot.writeString(kiter1364) + oprot.writeString(viter1365) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -29362,11 +29486,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1360, _vtype1361, _size1359 ) = iprot.readMapBegin() - for _i1363 in xrange(_size1359): - _key1364 = iprot.readString() - _val1365 = iprot.readString() - self.partitionSpecs[_key1364] = _val1365 + (_ktype1367, _vtype1368, _size1366 ) = iprot.readMapBegin() + for _i1370 in xrange(_size1366): + _key1371 = iprot.readString() + _val1372 = iprot.readString() + self.partitionSpecs[_key1371] = _val1372 iprot.readMapEnd() else: iprot.skip(ftype) @@ -29403,9 +29527,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1366,viter1367 in self.partitionSpecs.items(): - oprot.writeString(kiter1366) - oprot.writeString(viter1367) + for kiter1373,viter1374 in self.partitionSpecs.items(): + oprot.writeString(kiter1373) + oprot.writeString(viter1374) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -29488,11 +29612,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1371, _size1368) = iprot.readListBegin() - for _i1372 in xrange(_size1368): - _elem1373 = Partition() - _elem1373.read(iprot) - self.success.append(_elem1373) + (_etype1378, _size1375) = iprot.readListBegin() + for _i1379 in xrange(_size1375): + _elem1380 = Partition() + _elem1380.read(iprot) + self.success.append(_elem1380) iprot.readListEnd() else: iprot.skip(ftype) @@ -29533,8 +29657,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1374 in self.success: - iter1374.write(oprot) + for iter1381 in self.success: + iter1381.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29628,10 +29752,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1378, _size1375) = iprot.readListBegin() - for _i1379 in xrange(_size1375): - _elem1380 = iprot.readString() - self.part_vals.append(_elem1380) + (_etype1385, _size1382) = iprot.readListBegin() + for _i1386 in xrange(_size1382): + _elem1387 = iprot.readString() + self.part_vals.append(_elem1387) iprot.readListEnd() else: iprot.skip(ftype) @@ -29643,10 +29767,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1384, _size1381) = iprot.readListBegin() - for _i1385 in xrange(_size1381): - _elem1386 = iprot.readString() - self.group_names.append(_elem1386) + (_etype1391, _size1388) = iprot.readListBegin() + for _i1392 in xrange(_size1388): + _elem1393 = iprot.readString() + self.group_names.append(_elem1393) iprot.readListEnd() else: iprot.skip(ftype) @@ -29671,8 +29795,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1387 in self.part_vals: - oprot.writeString(iter1387) + for iter1394 in self.part_vals: + oprot.writeString(iter1394) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -29682,8 +29806,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1388 in self.group_names: - oprot.writeString(iter1388) + for iter1395 in self.group_names: + oprot.writeString(iter1395) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -30112,11 +30236,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1392, _size1389) = iprot.readListBegin() - for _i1393 in xrange(_size1389): - _elem1394 = Partition() - _elem1394.read(iprot) - self.success.append(_elem1394) + (_etype1399, _size1396) = iprot.readListBegin() + for _i1400 in xrange(_size1396): + _elem1401 = Partition() + _elem1401.read(iprot) + self.success.append(_elem1401) iprot.readListEnd() else: iprot.skip(ftype) @@ -30145,8 +30269,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1395 in self.success: - iter1395.write(oprot) + for iter1402 in self.success: + iter1402.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30399,10 +30523,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1399, _size1396) = iprot.readListBegin() - for _i1400 in xrange(_size1396): - _elem1401 = iprot.readString() - self.group_names.append(_elem1401) + (_etype1406, _size1403) = iprot.readListBegin() + for _i1407 in xrange(_size1403): + _elem1408 = iprot.readString() + self.group_names.append(_elem1408) iprot.readListEnd() else: iprot.skip(ftype) @@ -30435,8 +30559,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1402 in self.group_names: - oprot.writeString(iter1402) + for iter1409 in self.group_names: + oprot.writeString(iter1409) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -30497,11 +30621,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1406, _size1403) = iprot.readListBegin() - for _i1407 in xrange(_size1403): - _elem1408 = Partition() - _elem1408.read(iprot) - self.success.append(_elem1408) + (_etype1413, _size1410) = iprot.readListBegin() + for _i1414 in xrange(_size1410): + _elem1415 = Partition() + _elem1415.read(iprot) + self.success.append(_elem1415) iprot.readListEnd() else: iprot.skip(ftype) @@ -30530,8 +30654,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1409 in self.success: - iter1409.write(oprot) + for iter1416 in self.success: + iter1416.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30689,11 +30813,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1413, _size1410) = iprot.readListBegin() - for _i1414 in xrange(_size1410): - _elem1415 = PartitionSpec() - _elem1415.read(iprot) - self.success.append(_elem1415) + (_etype1420, _size1417) = iprot.readListBegin() + for _i1421 in xrange(_size1417): + _elem1422 = PartitionSpec() + _elem1422.read(iprot) + self.success.append(_elem1422) iprot.readListEnd() else: iprot.skip(ftype) @@ -30722,8 +30846,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1416 in self.success: - iter1416.write(oprot) + for iter1423 in self.success: + iter1423.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30881,10 +31005,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1420, _size1417) = iprot.readListBegin() - for _i1421 in xrange(_size1417): - _elem1422 = iprot.readString() - self.success.append(_elem1422) + (_etype1427, _size1424) = iprot.readListBegin() + for _i1428 in xrange(_size1424): + _elem1429 = iprot.readString() + self.success.append(_elem1429) iprot.readListEnd() else: iprot.skip(ftype) @@ -30913,8 +31037,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1423 in self.success: - oprot.writeString(iter1423) + for iter1430 in self.success: + oprot.writeString(iter1430) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31154,10 +31278,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1427, _size1424) = iprot.readListBegin() - for _i1428 in xrange(_size1424): - _elem1429 = iprot.readString() - self.part_vals.append(_elem1429) + (_etype1434, _size1431) = iprot.readListBegin() + for _i1435 in xrange(_size1431): + _elem1436 = iprot.readString() + self.part_vals.append(_elem1436) iprot.readListEnd() else: iprot.skip(ftype) @@ -31187,8 +31311,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1430 in self.part_vals: - oprot.writeString(iter1430) + for iter1437 in self.part_vals: + oprot.writeString(iter1437) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -31252,11 +31376,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1434, _size1431) = iprot.readListBegin() - for _i1435 in xrange(_size1431): - _elem1436 = Partition() - _elem1436.read(iprot) - self.success.append(_elem1436) + (_etype1441, _size1438) = iprot.readListBegin() + for _i1442 in xrange(_size1438): + _elem1443 = Partition() + _elem1443.read(iprot) + self.success.append(_elem1443) iprot.readListEnd() else: iprot.skip(ftype) @@ -31285,8 +31409,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1437 in self.success: - iter1437.write(oprot) + for iter1444 in self.success: + iter1444.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31373,10 +31497,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1441, _size1438) = iprot.readListBegin() - for _i1442 in xrange(_size1438): - _elem1443 = iprot.readString() - self.part_vals.append(_elem1443) + (_etype1448, _size1445) = iprot.readListBegin() + for _i1449 in xrange(_size1445): + _elem1450 = iprot.readString() + self.part_vals.append(_elem1450) iprot.readListEnd() else: iprot.skip(ftype) @@ -31393,10 +31517,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1447, _size1444) = iprot.readListBegin() - for _i1448 in xrange(_size1444): - _elem1449 = iprot.readString() - self.group_names.append(_elem1449) + (_etype1454, _size1451) = iprot.readListBegin() + for _i1455 in xrange(_size1451): + _elem1456 = iprot.readString() + self.group_names.append(_elem1456) iprot.readListEnd() else: iprot.skip(ftype) @@ -31421,8 +31545,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1450 in self.part_vals: - oprot.writeString(iter1450) + for iter1457 in self.part_vals: + oprot.writeString(iter1457) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -31436,8 +31560,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1451 in self.group_names: - oprot.writeString(iter1451) + for iter1458 in self.group_names: + oprot.writeString(iter1458) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -31499,11 +31623,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1455, _size1452) = iprot.readListBegin() - for _i1456 in xrange(_size1452): - _elem1457 = Partition() - _elem1457.read(iprot) - self.success.append(_elem1457) + (_etype1462, _size1459) = iprot.readListBegin() + for _i1463 in xrange(_size1459): + _elem1464 = Partition() + _elem1464.read(iprot) + self.success.append(_elem1464) iprot.readListEnd() else: iprot.skip(ftype) @@ -31532,8 +31656,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1458 in self.success: - iter1458.write(oprot) + for iter1465 in self.success: + iter1465.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31773,10 +31897,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1462, _size1459) = iprot.readListBegin() - for _i1463 in xrange(_size1459): - _elem1464 = iprot.readString() - self.part_vals.append(_elem1464) + (_etype1469, _size1466) = iprot.readListBegin() + for _i1470 in xrange(_size1466): + _elem1471 = iprot.readString() + self.part_vals.append(_elem1471) iprot.readListEnd() else: iprot.skip(ftype) @@ -31806,8 +31930,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1465 in self.part_vals: - oprot.writeString(iter1465) + for iter1472 in self.part_vals: + oprot.writeString(iter1472) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -31871,10 +31995,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1469, _size1466) = iprot.readListBegin() - for _i1470 in xrange(_size1466): - _elem1471 = iprot.readString() - self.success.append(_elem1471) + (_etype1476, _size1473) = iprot.readListBegin() + for _i1477 in xrange(_size1473): + _elem1478 = iprot.readString() + self.success.append(_elem1478) iprot.readListEnd() else: iprot.skip(ftype) @@ -31903,8 +32027,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1472 in self.success: - oprot.writeString(iter1472) + for iter1479 in self.success: + oprot.writeString(iter1479) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -32196,10 +32320,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1476, _size1473) = iprot.readListBegin() - for _i1477 in xrange(_size1473): - _elem1478 = iprot.readString() - self.success.append(_elem1478) + (_etype1483, _size1480) = iprot.readListBegin() + for _i1484 in xrange(_size1480): + _elem1485 = iprot.readString() + self.success.append(_elem1485) iprot.readListEnd() else: iprot.skip(ftype) @@ -32228,8 +32352,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1479 in self.success: - oprot.writeString(iter1479) + for iter1486 in self.success: + oprot.writeString(iter1486) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -32400,11 +32524,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1483, _size1480) = iprot.readListBegin() - for _i1484 in xrange(_size1480): - _elem1485 = Partition() - _elem1485.read(iprot) - self.success.append(_elem1485) + (_etype1490, _size1487) = iprot.readListBegin() + for _i1491 in xrange(_size1487): + _elem1492 = Partition() + _elem1492.read(iprot) + self.success.append(_elem1492) iprot.readListEnd() else: iprot.skip(ftype) @@ -32433,8 +32557,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1486 in self.success: - iter1486.write(oprot) + for iter1493 in self.success: + iter1493.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -32605,11 +32729,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1490, _size1487) = iprot.readListBegin() - for _i1491 in xrange(_size1487): - _elem1492 = PartitionSpec() - _elem1492.read(iprot) - self.success.append(_elem1492) + (_etype1497, _size1494) = iprot.readListBegin() + for _i1498 in xrange(_size1494): + _elem1499 = PartitionSpec() + _elem1499.read(iprot) + self.success.append(_elem1499) iprot.readListEnd() else: iprot.skip(ftype) @@ -32638,8 +32762,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1493 in self.success: - iter1493.write(oprot) + for iter1500 in self.success: + iter1500.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33218,10 +33342,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1497, _size1494) = iprot.readListBegin() - for _i1498 in xrange(_size1494): - _elem1499 = iprot.readString() - self.names.append(_elem1499) + (_etype1504, _size1501) = iprot.readListBegin() + for _i1505 in xrange(_size1501): + _elem1506 = iprot.readString() + self.names.append(_elem1506) iprot.readListEnd() else: iprot.skip(ftype) @@ -33246,8 +33370,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1500 in self.names: - oprot.writeString(iter1500) + for iter1507 in self.names: + oprot.writeString(iter1507) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -33306,11 +33430,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1504, _size1501) = iprot.readListBegin() - for _i1505 in xrange(_size1501): - _elem1506 = Partition() - _elem1506.read(iprot) - self.success.append(_elem1506) + (_etype1511, _size1508) = iprot.readListBegin() + for _i1512 in xrange(_size1508): + _elem1513 = Partition() + _elem1513.read(iprot) + self.success.append(_elem1513) iprot.readListEnd() else: iprot.skip(ftype) @@ -33339,8 +33463,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1507 in self.success: - iter1507.write(oprot) + for iter1514 in self.success: + iter1514.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33749,11 +33873,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1511, _size1508) = iprot.readListBegin() - for _i1512 in xrange(_size1508): - _elem1513 = Partition() - _elem1513.read(iprot) - self.new_parts.append(_elem1513) + (_etype1518, _size1515) = iprot.readListBegin() + for _i1519 in xrange(_size1515): + _elem1520 = Partition() + _elem1520.read(iprot) + self.new_parts.append(_elem1520) iprot.readListEnd() else: iprot.skip(ftype) @@ -33778,8 +33902,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1514 in self.new_parts: - iter1514.write(oprot) + for iter1521 in self.new_parts: + iter1521.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -33932,11 +34056,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1518, _size1515) = iprot.readListBegin() - for _i1519 in xrange(_size1515): - _elem1520 = Partition() - _elem1520.read(iprot) - self.new_parts.append(_elem1520) + (_etype1525, _size1522) = iprot.readListBegin() + for _i1526 in xrange(_size1522): + _elem1527 = Partition() + _elem1527.read(iprot) + self.new_parts.append(_elem1527) iprot.readListEnd() else: iprot.skip(ftype) @@ -33967,8 +34091,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1521 in self.new_parts: - iter1521.write(oprot) + for iter1528 in self.new_parts: + iter1528.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -34471,10 +34595,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1525, _size1522) = iprot.readListBegin() - for _i1526 in xrange(_size1522): - _elem1527 = iprot.readString() - self.part_vals.append(_elem1527) + (_etype1532, _size1529) = iprot.readListBegin() + for _i1533 in xrange(_size1529): + _elem1534 = iprot.readString() + self.part_vals.append(_elem1534) iprot.readListEnd() else: iprot.skip(ftype) @@ -34505,8 +34629,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1528 in self.part_vals: - oprot.writeString(iter1528) + for iter1535 in self.part_vals: + oprot.writeString(iter1535) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -34807,10 +34931,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1532, _size1529) = iprot.readListBegin() - for _i1533 in xrange(_size1529): - _elem1534 = iprot.readString() - self.part_vals.append(_elem1534) + (_etype1539, _size1536) = iprot.readListBegin() + for _i1540 in xrange(_size1536): + _elem1541 = iprot.readString() + self.part_vals.append(_elem1541) iprot.readListEnd() else: iprot.skip(ftype) @@ -34832,8 +34956,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1535 in self.part_vals: - oprot.writeString(iter1535) + for iter1542 in self.part_vals: + oprot.writeString(iter1542) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -35191,10 +35315,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1539, _size1536) = iprot.readListBegin() - for _i1540 in xrange(_size1536): - _elem1541 = iprot.readString() - self.success.append(_elem1541) + (_etype1546, _size1543) = iprot.readListBegin() + for _i1547 in xrange(_size1543): + _elem1548 = iprot.readString() + self.success.append(_elem1548) iprot.readListEnd() else: iprot.skip(ftype) @@ -35217,8 +35341,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1542 in self.success: - oprot.writeString(iter1542) + for iter1549 in self.success: + oprot.writeString(iter1549) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -35342,11 +35466,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1544, _vtype1545, _size1543 ) = iprot.readMapBegin() - for _i1547 in xrange(_size1543): - _key1548 = iprot.readString() - _val1549 = iprot.readString() - self.success[_key1548] = _val1549 + (_ktype1551, _vtype1552, _size1550 ) = iprot.readMapBegin() + for _i1554 in xrange(_size1550): + _key1555 = iprot.readString() + _val1556 = iprot.readString() + self.success[_key1555] = _val1556 iprot.readMapEnd() else: iprot.skip(ftype) @@ -35369,9 +35493,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1550,viter1551 in self.success.items(): - oprot.writeString(kiter1550) - oprot.writeString(viter1551) + for kiter1557,viter1558 in self.success.items(): + oprot.writeString(kiter1557) + oprot.writeString(viter1558) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -35447,11 +35571,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1553, _vtype1554, _size1552 ) = iprot.readMapBegin() - for _i1556 in xrange(_size1552): - _key1557 = iprot.readString() - _val1558 = iprot.readString() - self.part_vals[_key1557] = _val1558 + (_ktype1560, _vtype1561, _size1559 ) = iprot.readMapBegin() + for _i1563 in xrange(_size1559): + _key1564 = iprot.readString() + _val1565 = iprot.readString() + self.part_vals[_key1564] = _val1565 iprot.readMapEnd() else: iprot.skip(ftype) @@ -35481,9 +35605,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1559,viter1560 in self.part_vals.items(): - oprot.writeString(kiter1559) - oprot.writeString(viter1560) + for kiter1566,viter1567 in self.part_vals.items(): + oprot.writeString(kiter1566) + oprot.writeString(viter1567) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -35697,11 +35821,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1562, _vtype1563, _size1561 ) = iprot.readMapBegin() - for _i1565 in xrange(_size1561): - _key1566 = iprot.readString() - _val1567 = iprot.readString() - self.part_vals[_key1566] = _val1567 + (_ktype1569, _vtype1570, _size1568 ) = iprot.readMapBegin() + for _i1572 in xrange(_size1568): + _key1573 = iprot.readString() + _val1574 = iprot.readString() + self.part_vals[_key1573] = _val1574 iprot.readMapEnd() else: iprot.skip(ftype) @@ -35731,9 +35855,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1568,viter1569 in self.part_vals.items(): - oprot.writeString(kiter1568) - oprot.writeString(viter1569) + for kiter1575,viter1576 in self.part_vals.items(): + oprot.writeString(kiter1575) + oprot.writeString(viter1576) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -39785,10 +39909,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1573, _size1570) = iprot.readListBegin() - for _i1574 in xrange(_size1570): - _elem1575 = iprot.readString() - self.success.append(_elem1575) + (_etype1580, _size1577) = iprot.readListBegin() + for _i1581 in xrange(_size1577): + _elem1582 = iprot.readString() + self.success.append(_elem1582) iprot.readListEnd() else: iprot.skip(ftype) @@ -39811,8 +39935,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1576 in self.success: - oprot.writeString(iter1576) + for iter1583 in self.success: + oprot.writeString(iter1583) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -40500,10 +40624,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1580, _size1577) = iprot.readListBegin() - for _i1581 in xrange(_size1577): - _elem1582 = iprot.readString() - self.success.append(_elem1582) + (_etype1587, _size1584) = iprot.readListBegin() + for _i1588 in xrange(_size1584): + _elem1589 = iprot.readString() + self.success.append(_elem1589) iprot.readListEnd() else: iprot.skip(ftype) @@ -40526,8 +40650,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1583 in self.success: - oprot.writeString(iter1583) + for iter1590 in self.success: + oprot.writeString(iter1590) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -41041,11 +41165,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1587, _size1584) = iprot.readListBegin() - for _i1588 in xrange(_size1584): - _elem1589 = Role() - _elem1589.read(iprot) - self.success.append(_elem1589) + (_etype1594, _size1591) = iprot.readListBegin() + for _i1595 in xrange(_size1591): + _elem1596 = Role() + _elem1596.read(iprot) + self.success.append(_elem1596) iprot.readListEnd() else: iprot.skip(ftype) @@ -41068,8 +41192,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1590 in self.success: - iter1590.write(oprot) + for iter1597 in self.success: + iter1597.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -41578,10 +41702,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1594, _size1591) = iprot.readListBegin() - for _i1595 in xrange(_size1591): - _elem1596 = iprot.readString() - self.group_names.append(_elem1596) + (_etype1601, _size1598) = iprot.readListBegin() + for _i1602 in xrange(_size1598): + _elem1603 = iprot.readString() + self.group_names.append(_elem1603) iprot.readListEnd() else: iprot.skip(ftype) @@ -41606,8 +41730,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1597 in self.group_names: - oprot.writeString(iter1597) + for iter1604 in self.group_names: + oprot.writeString(iter1604) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -41834,11 +41958,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1601, _size1598) = iprot.readListBegin() - for _i1602 in xrange(_size1598): - _elem1603 = HiveObjectPrivilege() - _elem1603.read(iprot) - self.success.append(_elem1603) + (_etype1608, _size1605) = iprot.readListBegin() + for _i1609 in xrange(_size1605): + _elem1610 = HiveObjectPrivilege() + _elem1610.read(iprot) + self.success.append(_elem1610) iprot.readListEnd() else: iprot.skip(ftype) @@ -41861,8 +41985,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1604 in self.success: - iter1604.write(oprot) + for iter1611 in self.success: + iter1611.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -42532,10 +42656,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1608, _size1605) = iprot.readListBegin() - for _i1609 in xrange(_size1605): - _elem1610 = iprot.readString() - self.group_names.append(_elem1610) + (_etype1615, _size1612) = iprot.readListBegin() + for _i1616 in xrange(_size1612): + _elem1617 = iprot.readString() + self.group_names.append(_elem1617) iprot.readListEnd() else: iprot.skip(ftype) @@ -42556,8 +42680,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1611 in self.group_names: - oprot.writeString(iter1611) + for iter1618 in self.group_names: + oprot.writeString(iter1618) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -42612,10 +42736,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1615, _size1612) = iprot.readListBegin() - for _i1616 in xrange(_size1612): - _elem1617 = iprot.readString() - self.success.append(_elem1617) + (_etype1622, _size1619) = iprot.readListBegin() + for _i1623 in xrange(_size1619): + _elem1624 = iprot.readString() + self.success.append(_elem1624) iprot.readListEnd() else: iprot.skip(ftype) @@ -42638,8 +42762,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1618 in self.success: - oprot.writeString(iter1618) + for iter1625 in self.success: + oprot.writeString(iter1625) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -43571,10 +43695,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1622, _size1619) = iprot.readListBegin() - for _i1623 in xrange(_size1619): - _elem1624 = iprot.readString() - self.success.append(_elem1624) + (_etype1629, _size1626) = iprot.readListBegin() + for _i1630 in xrange(_size1626): + _elem1631 = iprot.readString() + self.success.append(_elem1631) iprot.readListEnd() else: iprot.skip(ftype) @@ -43591,8 +43715,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1625 in self.success: - oprot.writeString(iter1625) + for iter1632 in self.success: + oprot.writeString(iter1632) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -44119,10 +44243,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1629, _size1626) = iprot.readListBegin() - for _i1630 in xrange(_size1626): - _elem1631 = iprot.readString() - self.success.append(_elem1631) + (_etype1636, _size1633) = iprot.readListBegin() + for _i1637 in xrange(_size1633): + _elem1638 = iprot.readString() + self.success.append(_elem1638) iprot.readListEnd() else: iprot.skip(ftype) @@ -44139,8 +44263,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1632 in self.success: - oprot.writeString(iter1632) + for iter1639 in self.success: + oprot.writeString(iter1639) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -47153,10 +47277,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1636, _size1633) = iprot.readListBegin() - for _i1637 in xrange(_size1633): - _elem1638 = iprot.readString() - self.success.append(_elem1638) + (_etype1643, _size1640) = iprot.readListBegin() + for _i1644 in xrange(_size1640): + _elem1645 = iprot.readString() + self.success.append(_elem1645) iprot.readListEnd() else: iprot.skip(ftype) @@ -47173,8 +47297,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1639 in self.success: - oprot.writeString(iter1639) + for iter1646 in self.success: + oprot.writeString(iter1646) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -53484,11 +53608,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1643, _size1640) = iprot.readListBegin() - for _i1644 in xrange(_size1640): - _elem1645 = SchemaVersion() - _elem1645.read(iprot) - self.success.append(_elem1645) + (_etype1650, _size1647) = iprot.readListBegin() + for _i1651 in xrange(_size1647): + _elem1652 = SchemaVersion() + _elem1652.read(iprot) + self.success.append(_elem1652) iprot.readListEnd() else: iprot.skip(ftype) @@ -53517,8 +53641,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1646 in self.success: - iter1646.write(oprot) + for iter1653 in self.success: + iter1653.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -54993,11 +55117,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1650, _size1647) = iprot.readListBegin() - for _i1651 in xrange(_size1647): - _elem1652 = RuntimeStat() - _elem1652.read(iprot) - self.success.append(_elem1652) + (_etype1657, _size1654) = iprot.readListBegin() + for _i1658 in xrange(_size1654): + _elem1659 = RuntimeStat() + _elem1659.read(iprot) + self.success.append(_elem1659) iprot.readListEnd() else: iprot.skip(ftype) @@ -55020,8 +55144,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1653 in self.success: - iter1653.write(oprot) + for iter1660 in self.success: + iter1660.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -55820,3 +55944,280 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) + +class add_replication_metrics_args: + """ + Attributes: + - replicationMetricList + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'replicationMetricList', (ReplicationMetricList, ReplicationMetricList.thrift_spec), None, ), # 1 + ) + + def __init__(self, replicationMetricList=None,): + self.replicationMetricList = replicationMetricList + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.replicationMetricList = ReplicationMetricList() + self.replicationMetricList.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('add_replication_metrics_args') + if self.replicationMetricList is not None: + oprot.writeFieldBegin('replicationMetricList', TType.STRUCT, 1) + self.replicationMetricList.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.replicationMetricList) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class add_replication_metrics_result: + """ + Attributes: + - o1 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, o1=None,): + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('add_replication_metrics_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_replication_metrics_args: + """ + Attributes: + - rqst + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (GetReplicationMetricsRequest, GetReplicationMetricsRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, rqst=None,): + self.rqst = rqst + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = GetReplicationMetricsRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_replication_metrics_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.rqst) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_replication_metrics_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (ReplicationMetricList, ReplicationMetricList.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ReplicationMetricList() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_replication_metrics_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 59b9a695f5..f8301b48fb 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -28153,6 +28153,296 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class ReplicationMetrics: + """ + Attributes: + - scheduledExecutionId + - policy + - dumpExecutionId + - metadata + - progress + """ + + thrift_spec = ( + None, # 0 + (1, TType.I64, 'scheduledExecutionId', None, None, ), # 1 + (2, TType.STRING, 'policy', None, None, ), # 2 + (3, TType.I64, 'dumpExecutionId', None, None, ), # 3 + (4, TType.STRING, 'metadata', None, None, ), # 4 + (5, TType.STRING, 'progress', None, None, ), # 5 + ) + + def __init__(self, scheduledExecutionId=None, policy=None, dumpExecutionId=None, metadata=None, progress=None,): + self.scheduledExecutionId = scheduledExecutionId + self.policy = policy + self.dumpExecutionId = dumpExecutionId + self.metadata = metadata + self.progress = progress + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.scheduledExecutionId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.policy = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.dumpExecutionId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.metadata = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.progress = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('ReplicationMetrics') + if self.scheduledExecutionId is not None: + oprot.writeFieldBegin('scheduledExecutionId', TType.I64, 1) + oprot.writeI64(self.scheduledExecutionId) + oprot.writeFieldEnd() + if self.policy is not None: + oprot.writeFieldBegin('policy', TType.STRING, 2) + oprot.writeString(self.policy) + oprot.writeFieldEnd() + if self.dumpExecutionId is not None: + oprot.writeFieldBegin('dumpExecutionId', TType.I64, 3) + oprot.writeI64(self.dumpExecutionId) + oprot.writeFieldEnd() + if self.metadata is not None: + oprot.writeFieldBegin('metadata', TType.STRING, 4) + oprot.writeString(self.metadata) + oprot.writeFieldEnd() + if self.progress is not None: + oprot.writeFieldBegin('progress', TType.STRING, 5) + oprot.writeString(self.progress) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.scheduledExecutionId is None: + raise TProtocol.TProtocolException(message='Required field scheduledExecutionId is unset!') + if self.policy is None: + raise TProtocol.TProtocolException(message='Required field policy is unset!') + if self.dumpExecutionId is None: + raise TProtocol.TProtocolException(message='Required field dumpExecutionId is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.scheduledExecutionId) + value = (value * 31) ^ hash(self.policy) + value = (value * 31) ^ hash(self.dumpExecutionId) + value = (value * 31) ^ hash(self.metadata) + value = (value * 31) ^ hash(self.progress) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class ReplicationMetricList: + """ + Attributes: + - replicationMetricList + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'replicationMetricList', (TType.STRUCT,(ReplicationMetrics, ReplicationMetrics.thrift_spec)), None, ), # 1 + ) + + def __init__(self, replicationMetricList=None,): + self.replicationMetricList = replicationMetricList + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.replicationMetricList = [] + (_etype1127, _size1124) = iprot.readListBegin() + for _i1128 in xrange(_size1124): + _elem1129 = ReplicationMetrics() + _elem1129.read(iprot) + self.replicationMetricList.append(_elem1129) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('ReplicationMetricList') + if self.replicationMetricList is not None: + oprot.writeFieldBegin('replicationMetricList', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.replicationMetricList)) + for iter1130 in self.replicationMetricList: + iter1130.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.replicationMetricList is None: + raise TProtocol.TProtocolException(message='Required field replicationMetricList is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.replicationMetricList) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetReplicationMetricsRequest: + """ + Attributes: + - scheduledExecutionId + - policy + - dumpExecutionId + """ + + thrift_spec = ( + None, # 0 + (1, TType.I64, 'scheduledExecutionId', None, None, ), # 1 + (2, TType.STRING, 'policy', None, None, ), # 2 + (3, TType.I64, 'dumpExecutionId', None, None, ), # 3 + ) + + def __init__(self, scheduledExecutionId=None, policy=None, dumpExecutionId=None,): + self.scheduledExecutionId = scheduledExecutionId + self.policy = policy + self.dumpExecutionId = dumpExecutionId + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.scheduledExecutionId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.policy = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.dumpExecutionId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetReplicationMetricsRequest') + if self.scheduledExecutionId is not None: + oprot.writeFieldBegin('scheduledExecutionId', TType.I64, 1) + oprot.writeI64(self.scheduledExecutionId) + oprot.writeFieldEnd() + if self.policy is not None: + oprot.writeFieldBegin('policy', TType.STRING, 2) + oprot.writeString(self.policy) + oprot.writeFieldEnd() + if self.dumpExecutionId is not None: + oprot.writeFieldBegin('dumpExecutionId', TType.I64, 3) + oprot.writeI64(self.dumpExecutionId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.scheduledExecutionId) + value = (value * 31) ^ hash(self.policy) + value = (value * 31) ^ hash(self.dumpExecutionId) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class MetaException(TException): """ Attributes: diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index 73303206f7..87c42ba79d 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -6250,6 +6250,70 @@ class GetPartitionsPsWithAuthResponse ::Thrift::Struct.generate_accessors self end +class ReplicationMetrics + include ::Thrift::Struct, ::Thrift::Struct_Union + SCHEDULEDEXECUTIONID = 1 + POLICY = 2 + DUMPEXECUTIONID = 3 + METADATA = 4 + PROGRESS = 5 + + FIELDS = { + SCHEDULEDEXECUTIONID => {:type => ::Thrift::Types::I64, :name => 'scheduledExecutionId'}, + POLICY => {:type => ::Thrift::Types::STRING, :name => 'policy'}, + DUMPEXECUTIONID => {:type => ::Thrift::Types::I64, :name => 'dumpExecutionId'}, + METADATA => {:type => ::Thrift::Types::STRING, :name => 'metadata', :optional => true}, + PROGRESS => {:type => ::Thrift::Types::STRING, :name => 'progress', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field scheduledExecutionId is unset!') unless @scheduledExecutionId + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field policy is unset!') unless @policy + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dumpExecutionId is unset!') unless @dumpExecutionId + end + + ::Thrift::Struct.generate_accessors self +end + +class ReplicationMetricList + include ::Thrift::Struct, ::Thrift::Struct_Union + REPLICATIONMETRICLIST = 1 + + FIELDS = { + REPLICATIONMETRICLIST => {:type => ::Thrift::Types::LIST, :name => 'replicationMetricList', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ReplicationMetrics}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field replicationMetricList is unset!') unless @replicationMetricList + end + + ::Thrift::Struct.generate_accessors self +end + +class GetReplicationMetricsRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + SCHEDULEDEXECUTIONID = 1 + POLICY = 2 + DUMPEXECUTIONID = 3 + + FIELDS = { + SCHEDULEDEXECUTIONID => {:type => ::Thrift::Types::I64, :name => 'scheduledExecutionId', :optional => true}, + POLICY => {:type => ::Thrift::Types::STRING, :name => 'policy', :optional => true}, + DUMPEXECUTIONID => {:type => ::Thrift::Types::I64, :name => 'dumpExecutionId', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + class MetaException < ::Thrift::Exception include ::Thrift::Struct, ::Thrift::Struct_Union def initialize(message=nil) diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index b3f1df43a5..79dd849276 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -3985,6 +3985,37 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_scheduled_query failed: unknown result') end + def add_replication_metrics(replicationMetricList) + send_add_replication_metrics(replicationMetricList) + recv_add_replication_metrics() + end + + def send_add_replication_metrics(replicationMetricList) + send_message('add_replication_metrics', Add_replication_metrics_args, :replicationMetricList => replicationMetricList) + end + + def recv_add_replication_metrics() + result = receive_message(Add_replication_metrics_result) + raise result.o1 unless result.o1.nil? + return + end + + def get_replication_metrics(rqst) + send_get_replication_metrics(rqst) + return recv_get_replication_metrics() + end + + def send_get_replication_metrics(rqst) + send_message('get_replication_metrics', Get_replication_metrics_args, :rqst => rqst) + end + + def recv_get_replication_metrics() + result = receive_message(Get_replication_metrics_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_replication_metrics failed: unknown result') + end + end class Processor < ::FacebookService::Processor @@ -6975,6 +7006,28 @@ module ThriftHiveMetastore write_result(result, oprot, 'get_scheduled_query', seqid) end + def process_add_replication_metrics(seqid, iprot, oprot) + args = read_args(iprot, Add_replication_metrics_args) + result = Add_replication_metrics_result.new() + begin + @handler.add_replication_metrics(args.replicationMetricList) + rescue ::MetaException => o1 + result.o1 = o1 + end + write_result(result, oprot, 'add_replication_metrics', seqid) + end + + def process_get_replication_metrics(seqid, iprot, oprot) + args = read_args(iprot, Get_replication_metrics_args) + result = Get_replication_metrics_result.new() + begin + result.success = @handler.get_replication_metrics(args.rqst) + rescue ::MetaException => o1 + result.o1 = o1 + end + write_result(result, oprot, 'get_replication_metrics', seqid) + end + end # HELPER FUNCTIONS AND STRUCTURES @@ -15775,5 +15828,71 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Add_replication_metrics_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REPLICATIONMETRICLIST = 1 + + FIELDS = { + REPLICATIONMETRICLIST => {:type => ::Thrift::Types::STRUCT, :name => 'replicationMetricList', :class => ::ReplicationMetricList} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Add_replication_metrics_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_replication_metrics_args + include ::Thrift::Struct, ::Thrift::Struct_Union + RQST = 1 + + FIELDS = { + RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::GetReplicationMetricsRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_replication_metrics_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::ReplicationMetricList}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + end diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 8d83dbdb38..71af79370d 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -4270,6 +4270,17 @@ public void scheduledQueryMaintenance(ScheduledQueryMaintenanceRequest request) client.scheduled_query_maintenance(request); } + @Override + public void addReplicationMetrics(ReplicationMetricList replicationMetricList) throws MetaException, TException { + client.add_replication_metrics(replicationMetricList); + } + + @Override + public ReplicationMetricList getReplicationMetrics(GetReplicationMetricsRequest + replicationMetricsRequest) throws MetaException, TException { + return client.get_replication_metrics(replicationMetricsRequest); + } + /** * Builder for requiredFields bitmask to be sent via GetTablesExtRequest */ diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 63b8a73989..a8b1023b82 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -4053,4 +4053,13 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam */ void scheduledQueryProgress(ScheduledQueryProgressInfo info) throws TException; + /** + * Adds replication metrics for the replication policies. + * @param replicationMetricList + * @throws MetaException + */ + void addReplicationMetrics(ReplicationMetricList replicationMetricList) throws MetaException, TException; + + ReplicationMetricList getReplicationMetrics(GetReplicationMetricsRequest + replicationMetricsRequest) throws MetaException, TException; } diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index d1db106270..db1017beda 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -963,6 +963,17 @@ public static ConfVars getMetaConf(String name) { "hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/, "Maximum file size (in bytes) that Hive uses to do single HDFS copies between directories." + "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."), + REPL_METRICS_CACHE_MAXSIZE("metastore.repl.metrics.cache.maxsize", + "hive.repl.metrics.cache.maxsize", 10000 /*10000 rows */, + "Maximum in memory cache size to collect replication metrics. The metrics will be pushed to persistent" + + " storage at a frequency defined by config hive.repl.metrics.update.frequency. Till metrics are persisted to" + + " db, it will be stored in this cache. So set this property based on number of concurrent policies running " + + " and the frequency of persisting the metrics to persistent storage. " + ), + REPL_METRICS_UPDATE_FREQUENCY("metastore.repl.metrics.update.frequency", + "hive.repl.metrics.update.frequency", 1 /*1 minute */, + "Frequency at which replication Metrics will be stored in persistent storage. " + ), SCHEMA_INFO_CLASS("metastore.schema.info.class", "hive.metastore.schema.info.class", "org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo", "Fully qualified class name for the metastore schema information class \n" diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 6d4eb16611..b39d87727d 100644 --- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -2032,6 +2032,24 @@ struct GetPartitionsPsWithAuthResponse { 1: required list partitions } +struct ReplicationMetrics{ + 1: required i64 scheduledExecutionId, + 2: required string policy, + 3: required i64 dumpExecutionId, + 4: optional string metadata, + 5: optional string progress, +} + +struct ReplicationMetricList{ + 1: required list replicationMetricList, +} + +struct GetReplicationMetricsRequest { + 1: optional i64 scheduledExecutionId, + 2: optional string policy, + 3: optional i64 dumpExecutionId +} + // Exceptions. exception MetaException { @@ -2760,6 +2778,9 @@ PartitionsResponse get_partitions_req(1:PartitionsRequest req) void scheduled_query_maintenance(1: ScheduledQueryMaintenanceRequest request) throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:AlreadyExistsException o3, 4:InvalidInputException o4) void scheduled_query_progress(1: ScheduledQueryProgressInfo info) throws(1:MetaException o1, 2: InvalidOperationException o2) ScheduledQuery get_scheduled_query(1: ScheduledQueryKey scheduleKey) throws(1:MetaException o1, 2:NoSuchObjectException o2) + + void add_replication_metrics(1: ReplicationMetricList replicationMetricList) throws(1:MetaException o1) + ReplicationMetricList get_replication_metrics(1: GetReplicationMetricsRequest rqst) throws(1:MetaException o1) } // * Note about the DDL_TIME: When creating or altering a table or a partition, diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index f4f54581d1..4f9af1d42e 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -23,7 +23,6 @@ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName; -import static org.apache.hadoop.hive.metastore.api.FireEventRequestData._Fields.INSERT_DATA; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.parseDbName; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME; @@ -89,7 +88,6 @@ import org.apache.hadoop.hive.common.ZKDeRegisterWatcher; import org.apache.hadoop.hive.common.repl.ReplConst; import org.apache.hadoop.hive.metastore.api.*; -import org.apache.hadoop.hive.metastore.api.FireEventRequestData._Fields; import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent; import org.apache.hadoop.hive.metastore.events.AcidWriteEvent; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -9990,6 +9988,37 @@ public ScheduledQuery get_scheduled_query(ScheduledQueryKey scheduleKey) throws endFunction("get_scheduled_query", ex == null, ex); } } + + @Override + public void add_replication_metrics(ReplicationMetricList replicationMetricList) throws MetaException{ + startFunction("add_replication_metrics"); + Exception ex = null; + try { + getMS().addReplicationMetrics(replicationMetricList); + } catch (Exception e) { + LOG.error("Caught exception", e); + ex = e; + throw e; + } finally { + endFunction("add_replication_metrics", ex == null, ex); + } + } + + @Override + public ReplicationMetricList get_replication_metrics(GetReplicationMetricsRequest + getReplicationMetricsRequest) throws MetaException{ + startFunction("get_replication_metrics"); + Exception ex = null; + try { + return getMS().getReplicationMetrics(getReplicationMetricsRequest); + } catch (Exception e) { + LOG.error("Caught exception", e); + ex = e; + throw e; + } finally { + endFunction("get_replication_metrics", ex == null, ex); + } + } } private static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, Configuration conf) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 8fd49827b9..43f28ae498 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -125,6 +125,9 @@ import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.RuntimeStat; +import org.apache.hadoop.hive.metastore.api.ReplicationMetricList; +import org.apache.hadoop.hive.metastore.api.GetReplicationMetricsRequest; +import org.apache.hadoop.hive.metastore.api.ReplicationMetrics; import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; @@ -213,6 +216,7 @@ import org.apache.hadoop.hive.metastore.model.MWMResourcePlan; import org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status; import org.apache.hadoop.hive.metastore.model.MWMTrigger; +import org.apache.hadoop.hive.metastore.model.MReplicationMetrics; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; @@ -13100,6 +13104,97 @@ public void scheduledQueryProgress(ScheduledQueryProgressInfo info) throws Inval } } + @Override + public void addReplicationMetrics(ReplicationMetricList replicationMetricList) { + boolean commited = false; + try { + openTransaction(); + List mReplicationMetricsList = new ArrayList<>(); + for (ReplicationMetrics replicationMetric : replicationMetricList.getReplicationMetricList()) { + MReplicationMetrics mReplicationMetrics; + try { + mReplicationMetrics = pm.getObjectById(MReplicationMetrics.class, + replicationMetric.getScheduledExecutionId()); + } catch (JDOObjectNotFoundException e) { + mReplicationMetrics = new MReplicationMetrics(); + mReplicationMetrics.setDumpExecutionId(replicationMetric.getDumpExecutionId()); + mReplicationMetrics.setScheduledExecutionId(replicationMetric.getScheduledExecutionId()); + mReplicationMetrics.setPolicy(replicationMetric.getPolicy()); + } + if (!StringUtils.isEmpty(replicationMetric.getMetadata())) { + mReplicationMetrics.setMetadata(replicationMetric.getMetadata()); + } + if (!StringUtils.isEmpty(replicationMetric.getProgress())) { + mReplicationMetrics.setProgress(replicationMetric.getProgress()); + } + mReplicationMetricsList.add(mReplicationMetrics); + } + pm.makePersistentAll(mReplicationMetricsList); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + } + + @Override + public ReplicationMetricList getReplicationMetrics(GetReplicationMetricsRequest replicationMetricsRequest) { + boolean committed = false; + try { + openTransaction(); + ReplicationMetricList replicationMetrics = null; + if (replicationMetricsRequest.isSetPolicy()) { + replicationMetrics = getMReplicationMetrics(replicationMetricsRequest.getPolicy()); + } else if (replicationMetricsRequest.isSetScheduledExecutionId()) { + replicationMetrics = getMReplicationMetrics(replicationMetricsRequest.getScheduledExecutionId()); + } + committed = commitTransaction(); + return replicationMetrics; + } finally { + if (!committed) { + rollbackTransaction(); + } + } + } + + private ReplicationMetricList getMReplicationMetrics(String policy) { + ReplicationMetricList ret = new ReplicationMetricList(); + if (StringUtils.isEmpty(policy)) { + return ret; + } + Query query = pm.newQuery(MReplicationMetrics.class, "policy == policyParam"); + query.declareParameters("java.lang.String policyParam"); + query.setOrdering("scheduledExecutionId descending"); + List list = (List) query.execute(policy); + List returnList = new ArrayList<>(); + for (MReplicationMetrics mReplicationMetric : list) { + pm.retrieve(mReplicationMetric); + returnList.add(MReplicationMetrics.toThrift(mReplicationMetric)); + } + ret.setReplicationMetricList(returnList); + return ret; + } + + private ReplicationMetricList getMReplicationMetrics(long scheduledExecutionId) { + ReplicationMetricList ret = new ReplicationMetricList(); + if (scheduledExecutionId < 0) { + return ret; + } + Query query = pm.newQuery(MReplicationMetrics.class, + "scheduledExecutionId == scheduledExecutionIdParam"); + query.declareParameters("java.lang.Long scheduledExecutionIdParam"); + query.setOrdering("scheduledExecutionId descending"); + List list = (List) query.execute(scheduledExecutionId); + List returnList = new ArrayList<>(); + for (MReplicationMetrics mReplicationMetric : list) { + pm.retrieve(mReplicationMetric); + returnList.add(MReplicationMetrics.toThrift(mReplicationMetric)); + } + ret.setReplicationMetricList(returnList); + return ret; + } + private void ensureScheduledQueriesEnabled() throws MetaException { if (!MetastoreConf.getBoolVar(conf, ConfVars.SCHEDULED_QUERIES_ENABLED)) { throw new MetaException( diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 8b66a46b6f..6534750855 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -36,7 +36,6 @@ import org.apache.thrift.TException; public interface RawStore extends Configurable { - /*** * Annotation to skip retries */ @@ -1569,7 +1568,7 @@ void dropConstraint(String catName, String dbName, String tableName, String cons List addNotNullConstraints(List nns) throws InvalidObjectException, MetaException; /** - * Add default values to a table definition + * Add default values to a table definition. * @param dv list of default values * @return constraint names * @throws InvalidObjectException the specification is malformed. @@ -1579,7 +1578,7 @@ void dropConstraint(String catName, String dbName, String tableName, String cons throws InvalidObjectException, MetaException; /** - * Add check constraints to a table + * Add check constraints to a table. * @param cc check constraints to add * @return list of constraint names * @throws InvalidObjectException the specification is malformed @@ -1588,7 +1587,7 @@ void dropConstraint(String catName, String dbName, String tableName, String cons List addCheckConstraints(List cc) throws InvalidObjectException, MetaException; /** - * Gets the unique id of the backing datastore for the metadata + * Gets the unique id of the backing datastore for the metadata. * @return * @throws MetaException */ @@ -1659,7 +1658,7 @@ void createISchema(ISchema schema) throws AlreadyExistsException, MetaException, NoSuchObjectException; /** - * Alter an existing ISchema. This assumes the caller has already checked that such a schema + * Alter an existing ISchema. This assumes the caller has already checked that such a schema. * exists. * @param schemaName name of the schema * @param newSchema new schema object @@ -1677,7 +1676,8 @@ void createISchema(ISchema schema) throws AlreadyExistsException, MetaException, ISchema getISchema(ISchemaName schemaName) throws MetaException; /** - * Drop an ISchema. This does not check whether there are valid versions of the schema in + * Drop an ISchema. + * This does not check whether there are valid versions of the schema in * existence, it assumes the caller has already done that. * @param schemaName schema descriptor * @throws NoSuchObjectException no schema of this name exists @@ -1698,7 +1698,8 @@ void addSchemaVersion(SchemaVersion schemaVersion) throws AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException; /** - * Alter a schema version. Note that the Thrift interface only supports changing the serde + * Alter a schema version. + * Note that the Thrift interface only supports changing the serde * mapping and states. This method does not guarantee it will check anymore than that. This * method does not understand the state transitions and just assumes that the new state it is * passed is reasonable. @@ -1727,7 +1728,7 @@ void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersio SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaException; /** - * Get all of the versions of a schema + * Get all of the versions of a schema. * @param schemaName name of the schema * @return all versions of the schema * @throws MetaException general database exception @@ -1735,7 +1736,8 @@ void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersio List getAllSchemaVersion(ISchemaName schemaName) throws MetaException; /** - * Find all SchemaVersion objects that match a query. The query will select all SchemaVersions + * Find all SchemaVersion objects that match a query. + * The query will select all SchemaVersions * that are equal to all of the non-null passed in arguments. That is, if arguments * colName='name', colNamespace=null, type='string' are passed in, then all schemas that have * a column with colName 'name' and type 'string' will be returned. @@ -1762,7 +1764,7 @@ void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersio void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObjectException, MetaException; /** - * Get serde information + * Get serde information. * @param serDeName name of the SerDe * @return the SerDe, or null if there is no such serde * @throws NoSuchObjectException no serde with this name exists @@ -1771,7 +1773,7 @@ void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersio SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException; /** - * Add a serde + * Add a serde. * @param serde serde to add * @throws AlreadyExistsException a serde of this name already exists * @throws MetaException general database exception @@ -1843,6 +1845,18 @@ void scheduledQueryMaintenance(ScheduledQueryMaintenanceRequest request) void scheduledQueryProgress(ScheduledQueryProgressInfo info) throws MetaException, NoSuchObjectException, InvalidOperationException; + /** + * Add the replication metrics and progress info. + * @param replicationMetricList + */ + void addReplicationMetrics(ReplicationMetricList replicationMetricList); + + /** + * Gets the replication metrics and progress info. + * @param replicationMetricsRequest + */ + ReplicationMetricList getReplicationMetrics(GetReplicationMetricsRequest replicationMetricsRequest); + int deleteScheduledExecutions(int maxRetainSecs); int markScheduledExecutionsTimedOut(int timeoutSecs) throws InvalidOperationException, MetaException; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 8d28c1fd5a..c8e230b62f 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -2889,6 +2889,16 @@ public void scheduledQueryProgress(ScheduledQueryProgressInfo info) rawStore.scheduledQueryProgress(info); } + @Override + public void addReplicationMetrics(ReplicationMetricList replicationMetricList) { + rawStore.addReplicationMetrics(replicationMetricList); + } + + @Override + public ReplicationMetricList getReplicationMetrics(GetReplicationMetricsRequest replicationMetricsRequest) { + return rawStore.getReplicationMetrics(replicationMetricsRequest); + } + @Override public ScheduledQuery getScheduledQuery(ScheduledQueryKey scheduleKey) throws MetaException, NoSuchObjectException { return rawStore.getScheduledQuery(scheduleKey); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MReplicationMetrics.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MReplicationMetrics.java new file mode 100644 index 0000000000..1464128cd9 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MReplicationMetrics.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +import org.apache.hadoop.hive.metastore.api.ReplicationMetrics; +import org.apache.hadoop.hive.metastore.api.RuntimeStat; + +/** + * Describes Replication Metrics. + */ +public class MReplicationMetrics { + private long scheduledExecutionId; + private String policy; + private long dumpExecutionId; + private String metadata; + private String progress; + + public MReplicationMetrics() { + } + + public static ReplicationMetrics toThrift(MReplicationMetrics mReplicationMetric) { + ReplicationMetrics ret = new ReplicationMetrics(); + ret.setScheduledExecutionId(mReplicationMetric.scheduledExecutionId); + ret.setPolicy(mReplicationMetric.policy); + ret.setMetadata(mReplicationMetric.metadata); + ret.setProgress(mReplicationMetric.progress); + ret.setDumpExecutionId(mReplicationMetric.dumpExecutionId); + return ret; + } + + public long getScheduledExecutionId() { + return scheduledExecutionId; + } + + public void setScheduledExecutionId(long scheduledExecutionId) { + this.scheduledExecutionId = scheduledExecutionId; + } + + public String getPolicy() { + return policy; + } + + public void setPolicy(String policy) { + this.policy = policy; + } + + public long getDumpExecutionId() { + return dumpExecutionId; + } + + public void setDumpExecutionId(long dumpExecutionId) { + this.dumpExecutionId = dumpExecutionId; + } + + public String getMetadata() { + return metadata; + } + + public void setMetadata(String metadata) { + this.metadata = metadata; + } + + public String getProgress() { + return progress; + } + + public void setProgress(String progress) { + this.progress = progress; + } +} diff --git a/standalone-metastore/metastore-server/src/main/resources/package.jdo b/standalone-metastore/metastore-server/src/main/resources/package.jdo index b85631603e..626807e8c7 100644 --- a/standalone-metastore/metastore-server/src/main/resources/package.jdo +++ b/standalone-metastore/metastore-server/src/main/resources/package.jdo @@ -1523,6 +1523,29 @@ + + + + + + + + + + + + + + + + + + + + + + + diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql index d548c9094f..cb85b14ca7 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -770,6 +770,19 @@ CREATE INDEX LASTUPDATETIMEINDEX ON APP.SCHEDULED_EXECUTIONS (LAST_UPDATE_TIME); CREATE INDEX SCHEDULED_EXECUTIONS_SCHQID ON APP.SCHEDULED_EXECUTIONS (SCHEDULED_QUERY_ID); CREATE UNIQUE INDEX SCHEDULED_EXECUTIONS_UNIQUE_ID ON APP.SCHEDULED_EXECUTIONS (SCHEDULED_EXECUTION_ID); +--HIVE-23516 +CREATE TABLE "APP"."REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(4000), + "RM_PROGRESS" varchar(4000), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +CREATE INDEX "POLICY_IDX" ON "APP"."REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "APP"."REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql index db6cebc277..c4be5897f9 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql @@ -83,5 +83,18 @@ ALTER TABLE TXNS DROP COLUMN TXN_ID_TMP; RENAME TABLE NEXT_TXN_ID TO TXN_LOCK_TBL; RENAME COLUMN TXN_LOCK_TBL.NTXN_NEXT TO TXN_LOCK; +--HIVE-23516 +CREATE TABLE "APP"."REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(4000), + "RM_PROGRESS" varchar(4000), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +CREATE INDEX "POLICY_IDX" ON "APP"."REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "APP"."REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + -- This needs to be the last thing done. Insert any changes above this line. UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql index 6906bdf6b9..770c617a32 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -1318,6 +1318,20 @@ CREATE TABLE "SCHEDULED_EXECUTIONS" ( CREATE INDEX IDX_SCHEDULED_EX_LAST_UPDATE ON "SCHEDULED_EXECUTIONS" ("LAST_UPDATE_TIME"); CREATE INDEX IDX_SCHEDULED_EX_SQ_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_QUERY_ID"); +--HIVE-23516 +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(max), + "RM_PROGRESS" varchar(max), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script -- ----------------------------------------------------------------- diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql index 77098142d1..60b89022e1 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql @@ -119,6 +119,20 @@ Exec sp_rename 'TXN_LOCK_TBL.NTXN_NEXT', 'TXN_LOCK', 'COLUMN'; ALTER TABLE TXN_COMPONENTS WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID); CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); +--HIVE-23516 +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(max), + "RM_PROGRESS" varchar(max), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index b7f423c326..ed8da744cd 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -1242,6 +1242,20 @@ CREATE INDEX IDX_SCHEDULED_EXECUTIONS_LAST_UPDATE_TIME ON SCHEDULED_EXECUTIONS ( CREATE INDEX IDX_SCHEDULED_EXECUTIONS_SCHEDULED_QUERY_ID ON SCHEDULED_EXECUTIONS (SCHEDULED_QUERY_ID); CREATE UNIQUE INDEX UNIQUE_SCHEDULED_EXECUTIONS_ID ON SCHEDULED_EXECUTIONS (SCHEDULED_EXECUTION_ID); +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(4000), + "RM_PROGRESS" varchar(4000), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql index 02920666f8..5b55931747 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql @@ -90,6 +90,20 @@ DEALLOCATE PREPARE stmt; RENAME TABLE NEXT_TXN_ID TO TXN_LOCK_TBL; ALTER TABLE TXN_LOCK_TBL RENAME COLUMN NTXN_NEXT TO TXN_LOCK; +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(4000), + "RM_PROGRESS" varchar(4000), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql index 0082dcd06c..2971bd56a3 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql @@ -1216,6 +1216,20 @@ CREATE TABLE "SCHEDULED_EXECUTIONS" ( CREATE INDEX IDX_SCHEDULED_EX_LAST_UPDATE ON "SCHEDULED_EXECUTIONS" ("LAST_UPDATE_TIME"); CREATE INDEX IDX_SCHEDULED_EX_SQ_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_QUERY_ID"); +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" number NOT NULL, + "RM_POLICY" varchar2(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" number NOT NULL, + "RM_METADATA" varchar2(4000), + "RM_PROGRESS" varchar2(4000), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script -- ----------------------------------------------------------------- diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql index fb28290156..97a12a2c32 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql @@ -88,6 +88,20 @@ ALTER TABLE TXNS MODIFY TXN_ID default TXNS_TXN_ID_SEQ.nextval; RENAME TABLE NEXT_TXN_ID TO TXN_LOCK_TBL; ALTER TABLE TXN_LOCK_TBL RENAME COLUMN NTXN_NEXT TO TXN_LOCK; +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" number NOT NULL, + "RM_POLICY" varchar2(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" number NOT NULL, + "RM_METADATA" varchar2(4000), + "RM_PROGRESS" varchar2(4000), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS Status from dual; diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql index 717e707407..18ac4cbc97 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql @@ -1904,6 +1904,20 @@ CREATE INDEX IDX_SCHEDULED_EXECUTIONS_LAST_UPDATE_TIME ON "SCHEDULED_EXECUTIONS" CREATE INDEX IDX_SCHEDULED_EXECUTIONS_SCHEDULED_QUERY_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_QUERY_ID"); CREATE UNIQUE INDEX UNIQUE_SCHEDULED_EXECUTIONS_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_EXECUTION_ID"); +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(4000), + "RM_PROGRESS" varchar(4000), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql index ca799bae08..63b504ea7f 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql @@ -213,6 +213,21 @@ ALTER TABLE "TXNS" ALTER "TXN_ID" SET DEFAULT nextval('"TXNS_TXN_ID_SEQ"'); ALTER TABLE "NEXT_TXN_ID" RENAME TO "TXN_LOCK_TBL"; ALTER TABLE "TXN_LOCK_TBL" RENAME COLUMN "NTXN_NEXT" TO "TXN_LOCK"; + +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(4000), + "RM_PROGRESS" varchar(4000), + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + -- These lines need to be last. Insert any changes above. UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0'; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index e31dc064c1..d7b2d65622 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -66,6 +66,8 @@ import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.RuntimeStat; +import org.apache.hadoop.hive.metastore.api.ReplicationMetricList; +import org.apache.hadoop.hive.metastore.api.GetReplicationMetricsRequest; import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; @@ -1326,6 +1328,16 @@ public void scheduledQueryProgress(ScheduledQueryProgressInfo info) throws Inval objectStore.scheduledQueryProgress(info); } + @Override + public void addReplicationMetrics(ReplicationMetricList replicationMetricList) { + objectStore.addReplicationMetrics(replicationMetricList); + } + + @Override + public ReplicationMetricList getReplicationMetrics(GetReplicationMetricsRequest replicationMetricsRequest) { + return objectStore.getReplicationMetrics(replicationMetricsRequest); + } + @Override public ScheduledQuery getScheduledQuery(ScheduledQueryKey scheduleKey) throws NoSuchObjectException { return objectStore.getScheduledQuery(scheduleKey); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 89acdcc55b..19586075ae 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -69,6 +69,8 @@ import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.RuntimeStat; +import org.apache.hadoop.hive.metastore.api.ReplicationMetricList; +import org.apache.hadoop.hive.metastore.api.GetReplicationMetricsRequest; import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; @@ -1309,6 +1311,16 @@ public void scheduledQueryMaintenance(ScheduledQueryMaintenanceRequest request) public void scheduledQueryProgress(ScheduledQueryProgressInfo info) { } + @Override + public void addReplicationMetrics(ReplicationMetricList replicationMetricList) { + throw new RuntimeException("unimplemented"); + } + + @Override + public ReplicationMetricList getReplicationMetrics(GetReplicationMetricsRequest replicationMetricsRequest) { + throw new RuntimeException("unimplemented"); + } + @Override public ScheduledQuery getScheduledQuery(ScheduledQueryKey scheduleKey) { throw new RuntimeException("unimplemented"); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 81bc43d1bc..218ea44335 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -3781,6 +3781,17 @@ public void scheduledQueryProgress(ScheduledQueryProgressInfo info) throws TExce client.scheduled_query_progress(info); } + @Override + public void addReplicationMetrics(ReplicationMetricList replicationMetricList) throws MetaException, TException { + client.add_replication_metrics(replicationMetricList); + } + + @Override + public ReplicationMetricList getReplicationMetrics(GetReplicationMetricsRequest + replicationMetricsRequest) throws MetaException, TException { + return client.get_replication_metrics(replicationMetricsRequest); + } + @Override public ScheduledQueryPollResponse scheduledQueryPoll(ScheduledQueryPollRequest request) throws MetaException, TException { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestReplicationMetrics.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestReplicationMetrics.java new file mode 100644 index 0000000000..73c142d923 --- /dev/null +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestReplicationMetrics.java @@ -0,0 +1,265 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.ObjectStore; +import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; +import org.apache.hadoop.hive.metastore.api.GetReplicationMetricsRequest; +import org.apache.hadoop.hive.metastore.api.ReplicationMetricList; +import org.apache.hadoop.hive.metastore.api.ReplicationMetrics; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +/** + * TestReplicationMetrics for testing replication metrics + * persistence to HMS + */ +@RunWith(Parameterized.class) +@Category(MetastoreUnitTest.class) +public class TestReplicationMetrics extends MetaStoreClientTest { + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + + public TestReplicationMetrics(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + } + + @Before + public void setUp() throws Exception { + client = metaStore.getClient(); + } + + @After + public void tearDown() throws Exception { + try { + client.close(); + } catch (Exception e) { + // HIVE-19729: Shallow the exceptions based on the discussion in the Jira + } + client = null; + } + + @Test + public void testAddMetrics() throws Exception { + ObjectStore objStore = new ObjectStore(); + objStore.setConf(metaStore.getConf()); + objStore.deleteRuntimeStats(0); + ReplicationMetricList replicationMetricList = new ReplicationMetricList(); + List replicationMetrics = new ArrayList<>(); + replicationMetrics.add(createReplicationMetric("repl1", 1L)); + replicationMetrics.add(createReplicationMetric("repl1", 2L)); + replicationMetricList.setReplicationMetricList(replicationMetrics); + objStore.addReplicationMetrics(replicationMetricList); + Thread.sleep(1000); + replicationMetrics = new ArrayList<>(); + replicationMetrics.add(createReplicationMetric("repl2", 3L)); + replicationMetrics.add(createReplicationMetric("repl2", 4L)); + replicationMetricList.setReplicationMetricList(replicationMetrics); + objStore.addReplicationMetrics(replicationMetricList); + Thread.sleep(1000); + + GetReplicationMetricsRequest getReplicationMetricsRequest = new GetReplicationMetricsRequest(); + getReplicationMetricsRequest.setPolicy("repl1"); + ReplicationMetricList actualList = client.getReplicationMetrics(getReplicationMetricsRequest); + assertEquals(2, actualList.getReplicationMetricListSize()); + List actualMetrics = actualList.getReplicationMetricList(); + //Ordering should be descending + ReplicationMetrics actualMetric0 = actualMetrics.get(0); + assertEquals("repl1", actualMetric0.getPolicy()); + assertEquals(2L, actualMetric0.getScheduledExecutionId()); + assertEquals(1, actualMetric0.getDumpExecutionId()); + assertEquals("metadata", actualMetric0.getMetadata()); + assertEquals("progress", actualMetric0.getProgress()); + ReplicationMetrics actualMetric1 = actualMetrics.get(1); + assertEquals("repl1", actualMetric1.getPolicy()); + assertEquals(1L, actualMetric1.getScheduledExecutionId()); + assertEquals(1, actualMetric1.getDumpExecutionId()); + assertEquals("metadata", actualMetric1.getMetadata()); + assertEquals("progress", actualMetric1.getProgress()); + + getReplicationMetricsRequest = new GetReplicationMetricsRequest(); + getReplicationMetricsRequest.setPolicy("repl2"); + actualList = client.getReplicationMetrics(getReplicationMetricsRequest); + assertEquals(2, actualList.getReplicationMetricListSize()); + actualMetrics = actualList.getReplicationMetricList(); + //Ordering should be descending + actualMetric0 = actualMetrics.get(0); + assertEquals("repl2", actualMetric0.getPolicy()); + assertEquals(4L, actualMetric0.getScheduledExecutionId()); + assertEquals(1, actualMetric0.getDumpExecutionId()); + assertEquals("metadata", actualMetric0.getMetadata()); + assertEquals("progress", actualMetric0.getProgress()); + actualMetric1 = actualMetrics.get(1); + assertEquals("repl2", actualMetric1.getPolicy()); + assertEquals(3L, actualMetric1.getScheduledExecutionId()); + assertEquals(1, actualMetric1.getDumpExecutionId()); + assertEquals("metadata", actualMetric1.getMetadata()); + assertEquals("progress", actualMetric1.getProgress()); + } + + @Test + public void testUpdateMetrics() throws Exception { + ObjectStore objStore = new ObjectStore(); + objStore.setConf(metaStore.getConf()); + objStore.deleteRuntimeStats(0); + ReplicationMetricList replicationMetricList = new ReplicationMetricList(); + List replicationMetrics = new ArrayList<>(); + replicationMetrics.add(createReplicationMetric("repl1", 1L)); + replicationMetrics.add(createReplicationMetric("repl1", 2L)); + replicationMetricList.setReplicationMetricList(replicationMetrics); + objStore.addReplicationMetrics(replicationMetricList); + Thread.sleep(1000); + replicationMetrics = new ArrayList<>(); + replicationMetrics.add(createReplicationMetric("repl2", 3L)); + replicationMetrics.add(createReplicationMetric("repl2", 4L)); + replicationMetricList.setReplicationMetricList(replicationMetrics); + objStore.addReplicationMetrics(replicationMetricList); + Thread.sleep(1000); + replicationMetrics = new ArrayList<>(); + replicationMetrics.add(updateReplicationMetric("repl2", 3L, "progress1")); + replicationMetricList.setReplicationMetricList(replicationMetrics); + objStore.addReplicationMetrics(replicationMetricList); + Thread.sleep(1000); + + GetReplicationMetricsRequest getReplicationMetricsRequest = new GetReplicationMetricsRequest(); + getReplicationMetricsRequest.setPolicy("repl1"); + ReplicationMetricList actualList = client.getReplicationMetrics(getReplicationMetricsRequest); + assertEquals(2, actualList.getReplicationMetricListSize()); + List actualMetrics = actualList.getReplicationMetricList(); + //Ordering should be descending + ReplicationMetrics actualMetric0 = actualMetrics.get(0); + assertEquals("repl1", actualMetric0.getPolicy()); + assertEquals(2L, actualMetric0.getScheduledExecutionId()); + assertEquals(1, actualMetric0.getDumpExecutionId()); + assertEquals("metadata", actualMetric0.getMetadata()); + assertEquals("progress", actualMetric0.getProgress()); + + ReplicationMetrics actualMetric1 = actualMetrics.get(1); + assertEquals("repl1", actualMetric1.getPolicy()); + assertEquals(1L, actualMetric1.getScheduledExecutionId()); + assertEquals(1, actualMetric1.getDumpExecutionId()); + assertEquals("metadata", actualMetric1.getMetadata()); + assertEquals("progress", actualMetric1.getProgress()); + + getReplicationMetricsRequest = new GetReplicationMetricsRequest(); + getReplicationMetricsRequest.setPolicy("repl2"); + actualList = client.getReplicationMetrics(getReplicationMetricsRequest); + assertEquals(2, actualList.getReplicationMetricListSize()); + actualMetrics = actualList.getReplicationMetricList(); + //Ordering should be descending + actualMetric0 = actualMetrics.get(0); + assertEquals("repl2", actualMetric0.getPolicy()); + assertEquals(4L, actualMetric0.getScheduledExecutionId()); + assertEquals(1, actualMetric0.getDumpExecutionId()); + assertEquals("metadata", actualMetric0.getMetadata()); + assertEquals("progress", actualMetric0.getProgress()); + + actualMetric1 = actualMetrics.get(1); + assertEquals("repl2", actualMetric1.getPolicy()); + assertEquals(3L, actualMetric1.getScheduledExecutionId()); + assertEquals(1, actualMetric1.getDumpExecutionId()); + assertEquals("metadata", actualMetric1.getMetadata()); + assertEquals("progress1", actualMetric1.getProgress()); + } + + @Test + public void testGetMetricsByScheduleId() throws Exception { + ObjectStore objStore = new ObjectStore(); + objStore.setConf(metaStore.getConf()); + objStore.deleteRuntimeStats(0); + ReplicationMetricList replicationMetricList = new ReplicationMetricList(); + List replicationMetrics = new ArrayList<>(); + replicationMetrics.add(createReplicationMetric("repl1", 1L)); + replicationMetrics.add(createReplicationMetric("repl1", 2L)); + replicationMetricList.setReplicationMetricList(replicationMetrics); + objStore.addReplicationMetrics(replicationMetricList); + Thread.sleep(1000); + replicationMetrics = new ArrayList<>(); + replicationMetrics.add(createReplicationMetric("repl2", 3L)); + replicationMetrics.add(createReplicationMetric("repl2", 4L)); + replicationMetricList.setReplicationMetricList(replicationMetrics); + objStore.addReplicationMetrics(replicationMetricList); + Thread.sleep(1000); + + GetReplicationMetricsRequest getReplicationMetricsRequest = new GetReplicationMetricsRequest(); + getReplicationMetricsRequest.setScheduledExecutionId(1L); + ReplicationMetricList actualList = client.getReplicationMetrics(getReplicationMetricsRequest); + assertEquals(1, actualList.getReplicationMetricListSize()); + List actualMetrics = actualList.getReplicationMetricList(); + //Ordering should be descending + ReplicationMetrics actualMetric0 = actualMetrics.get(0); + assertEquals("repl1", actualMetric0.getPolicy()); + assertEquals(1L, actualMetric0.getScheduledExecutionId()); + assertEquals(1, actualMetric0.getDumpExecutionId()); + assertEquals("metadata", actualMetric0.getMetadata()); + assertEquals("progress", actualMetric0.getProgress()); + + //Update progress + replicationMetrics = new ArrayList<>(); + replicationMetrics.add(updateReplicationMetric("repl1", 1L, "progress1")); + replicationMetricList.setReplicationMetricList(replicationMetrics); + objStore.addReplicationMetrics(replicationMetricList); + Thread.sleep(1000); + + //get the metrics again + getReplicationMetricsRequest = new GetReplicationMetricsRequest(); + getReplicationMetricsRequest.setScheduledExecutionId(1L); + actualList = client.getReplicationMetrics(getReplicationMetricsRequest); + assertEquals(1, actualList.getReplicationMetricListSize()); + actualMetrics = actualList.getReplicationMetricList(); + //Ordering should be descending + actualMetric0 = actualMetrics.get(0); + assertEquals("repl1", actualMetric0.getPolicy()); + assertEquals(1L, actualMetric0.getScheduledExecutionId()); + assertEquals(1, actualMetric0.getDumpExecutionId()); + assertEquals("metadata", actualMetric0.getMetadata()); + assertEquals("progress1", actualMetric0.getProgress()); + + } + + private ReplicationMetrics createReplicationMetric(String policyName, Long scheduleId) { + ReplicationMetrics replicationMetrics = new ReplicationMetrics(); + replicationMetrics.setPolicy(policyName); + replicationMetrics.setScheduledExecutionId(scheduleId); + replicationMetrics.setDumpExecutionId(1); + replicationMetrics.setMetadata("metadata"); + replicationMetrics.setProgress("progress"); + return replicationMetrics; + } + + private ReplicationMetrics updateReplicationMetric(String policyName, Long scheduleId, String progress) { + ReplicationMetrics replicationMetrics = new ReplicationMetrics(); + replicationMetrics.setPolicy(policyName); + replicationMetrics.setScheduledExecutionId(scheduleId); + replicationMetrics.setProgress(progress); + return replicationMetrics; + } + +}