diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java index 4a76010904..5e9917af1b 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java @@ -424,9 +424,7 @@ private String getFinalDynamicPartitionDestination(Table table, Map entry : storer.getProperties().entrySet()) { - if (!entry.getKey().toString().equals(StatsSetupConst.COLUMN_STATS_ACCURATE)) { - params.put(entry.getKey().toString(), entry.getValue().toString()); - } + params.put(entry.getKey().toString(), entry.getValue().toString()); } return params; } @@ -761,11 +759,9 @@ private void registerPartitions(JobContext context) throws IOException{ if (!src.equals(tblPath)) { fs.delete(src, true); } - if (table.getParameters() != null - && table.getParameters().containsKey(StatsSetupConst.COLUMN_STATS_ACCURATE)) { - table.getParameters().remove(StatsSetupConst.COLUMN_STATS_ACCURATE); - client.alter_table(table.getDbName(), table.getTableName(), table.getTTable()); - } + // Won't work for an ACID table. + client.alterTableBasicStats(table.getCatName(), table.getDbName(), table.getTableName(), false, null, 0, null); + client.invalidateAllColumnStats(table.getCatName(), table.getDbName(), table.getTableName(), null, 0); return; } diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java index e16674d99f..b433a81959 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java @@ -216,7 +216,7 @@ public void createTable() throws Exception { if (isTableImmutable()){ tableParams.put(hive_metastoreConstants.IS_IMMUTABLE,"true"); } - StatsSetupConst.setBasicStatsState(tableParams, StatsSetupConst.TRUE); + tbl.setIsStatsCompliant(true); tableParams.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "false"); tbl.setParameters(tableParams); diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index be40395cc3..431f542f14 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -1304,10 +1304,31 @@ public int deleteRuntimeStats(int maxRetainSecs) throws MetaException { } @Override - public Map> getPartitionColsWithStats(String catName, - String dbName, String tableName) throws MetaException, + public Map> getPartitionColsWithAccurateStats(String catName, + String dbName, String tableName, String validWriteId, boolean isAccurate) throws MetaException, NoSuchObjectException { return null; } + @Override + public Table alterTableBasicStats(String catName, String dbname, String name, + Map stats, boolean isAccurate, long writeId, + String validWriteIds) throws MetaException { + return null; + } + + @Override + public Partition alterPartitionBasicStats(String catName, String dbname, + String name, List partVals, Map stats, + boolean isAccurate, long writeId, String validWriteIds) + throws MetaException { + return null; + } + + @Override + public Map invalidateAllColumnStatistics(String catName, + String dbName, String tblName, List partNames, long writeId) + throws MetaException, NoSuchObjectException { + return null; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 939ef360c2..ecede96b7e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -57,6 +57,7 @@ import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.util.concurrent.ListenableFuture; + import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -245,6 +246,7 @@ import org.apache.hadoop.hive.ql.plan.TruncateTableDesc; import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; +import org.apache.hadoop.hive.ql.plan.UpdateStatsDesc; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.security.authorization.AuthorizationUtils; import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationTranslator; @@ -427,6 +429,11 @@ public int execute(DriverContext driverContext) { return addPartitions(db, addPartitionDesc); } + UpdateStatsDesc updateStatsDesc = work.getUpdateStatsDesc(); + if (updateStatsDesc != null) { + return updateStats(db, updateStatsDesc); + } + RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc(); if (renamePartitionDesc != null) { return renamePartition(db, renamePartitionDesc); @@ -667,6 +674,18 @@ public int execute(DriverContext driverContext) { return 0; } + private int updateStats(Hive db, UpdateStatsDesc desc) throws HiveException { + switch (desc.getOpType()) { + case INVALIDATE_ALL: { + db.invalidateStats(desc.getCatName(), desc.getDbName(), + desc.getTableName(), desc.getPartName(), true, true); + break; + } + default: throw new AssertionError(desc.getOpType()); + } + return 0; + } + private int createResourcePlan(Hive db, CreateResourcePlanDesc createResourcePlanDesc) throws HiveException { db.createResourcePlan(createResourcePlanDesc.getResourcePlan(), @@ -3648,7 +3667,7 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, int numParts = 0; for (Partition partition : parts) { Map props = partition.getParameters(); - Boolean state = StatsSetupConst.areBasicStatsUptoDate(props); + Boolean state = partition.getTPartition().isIsStatsCompliant(); for (String stat : StatsSetupConst.SUPPORTED_STATS) { stateMap.put(stat, stateMap.get(stat) && state); if (props != null && props.get(stat) != null) { @@ -3658,8 +3677,8 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, numParts++; } for (String stat : StatsSetupConst.SUPPORTED_STATS) { - StatsSetupConst.setBasicStatsState(tblProps, Boolean.toString(stateMap.get(stat))); tblProps.put(stat, valueMap.get(stat).toString()); + tblProps.put(stat + " ACCURATE", stateMap.get(stat).toString()); } tblProps.put(StatsSetupConst.NUM_PARTITIONS, Integer.toString(numParts)); tbl.setParameters(tblProps); @@ -3688,18 +3707,12 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, cs.getNumNulls(), cs.getCountDistint(), null, cs.getAvgColLen(), cs.getAvgColLen(), cs.getNumTrues(), cs.getNumFalses()); ColumnStatisticsObj cso = new ColumnStatisticsObj(partCol.getName(), partCol.getType(), data); colStats = Collections.singletonList(cso); - StatsSetupConst.setColumnStatsState(tblProps, colNames); } else { cols = Hive.getFieldsFromDeserializer(colPath, deserializer); List parts = db.getPartitionNames(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), (short) -1); AggrStats aggrStats = db.getAggrColStatsFor( dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, parts, false); colStats = aggrStats.getColStats(); - if (parts.size() == aggrStats.getPartsFound()) { - StatsSetupConst.setColumnStatsState(tblProps, colNames); - } else { - StatsSetupConst.removeColumnStatsState(tblProps, colNames); - } } tbl.setParameters(tblProps); } else { @@ -5078,8 +5091,7 @@ private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws Exceptio if (crtTbl.getLocation() == null && !tbl.isPartitioned() && conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), - MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE); + StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters()); } // create the table diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 7818efbbf5..f054c8a60e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -33,7 +33,9 @@ import java.util.regex.Pattern; import org.apache.avro.generic.GenericData; + import com.google.common.base.Preconditions; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -1680,6 +1682,11 @@ public static TableSnapshot getTableSnapshot(Configuration conf, if (tblName == null) { tblName = tbl.getTableName(); } + return getTableSnapshotForTxnTable(conf, dbName, tblName, isStatsUpdater); + } + + public static TableSnapshot getTableSnapshotForTxnTable(Configuration conf, + String dbName, String tblName, boolean isStatsUpdater) throws LockException, AssertionError { long writeId = -1; ValidWriteIdList validWriteIdList = null; diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java index 1b11e0e762..e1bcb48a8a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java @@ -1004,7 +1004,7 @@ private CacheChunk prepareRangesForCompressedRead(long cOffset, long endCOffset, if (current instanceof CacheChunk) { // 2a. This is a decoded compression buffer, add as is. CacheChunk cc = (CacheChunk)current; - if (isTracingEnabled) { // TODO# HERE unaccompanied lock + if (isTracingEnabled) { LOG.trace("Locking " + cc.getBuffer() + " due to reuse"); } cacheWrapper.reuseBuffer(cc.getBuffer()); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 239a606fdb..82915c657e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -126,6 +126,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType; +import org.apache.hadoop.hive.ql.plan.UpdateStatsDesc; import org.apache.hadoop.hive.ql.session.CreateTableAutomaticGrant; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde2.Deserializer; @@ -1908,9 +1909,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par } // column stats will be inaccurate - if (!hasFollowingStatsTask) { - StatsSetupConst.clearColumnStatsState(newTPart.getParameters()); - } + boolean areColStatsInvalid = !hasFollowingStatsTask; // recreate the partition if it existed before if (isSkewedStoreAsSubdir) { @@ -1923,14 +1922,11 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps); newCreatedTpart.getSd().setSkewedInfo(skewedInfo); } - if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - StatsSetupConst.setBasicStatsState(newTPart.getParameters(), StatsSetupConst.FALSE); - } + boolean areBasicStatsInvalid = !this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER); if (oldPart == null) { newTPart.getTPartition().setParameters(new HashMap()); if (this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - StatsSetupConst.setStatsStateForCreateTable(newTPart.getParameters(), - MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE); + StatsSetupConst.setStatsStateForCreateTable(newTPart.getParameters()); } // Note: we are creating a brand new the partition, so this is going to be valid for ACID. List filesForStats = null; @@ -1963,6 +1959,9 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par // In that case, we want to retry with alterPartition. LOG.debug("Caught AlreadyExistsException, trying to alter partition instead"); setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart, tableSnapshot); + if (areBasicStatsInvalid || areColStatsInvalid) { + invalidatePartStats(newTPart, areColStatsInvalid, areBasicStatsInvalid); + } } catch (Exception e) { try { final FileSystem newPathFileSystem = newPartPath.getFileSystem(this.getConf()); @@ -1982,6 +1981,9 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par } } else { setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart, tableSnapshot); + if (areBasicStatsInvalid || areColStatsInvalid) { + invalidatePartStats(newTPart, areColStatsInvalid, areBasicStatsInvalid); + } } perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_PARTITION); @@ -2001,7 +2003,6 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par } } - private static Path genPartPathFromTable(Table tbl, Map partSpec, Path tblDataLocationPath) throws MetaException { Path partPath = new Path(tbl.getDataLocation(), Warehouse.makePartPath(partSpec)); @@ -2500,14 +2501,8 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType } perfLogger.PerfLogEnd("MoveTask", PerfLogger.FILE_MOVES); } - if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); - } - - //column stats will be inaccurate - if (!hasFollowingStatsTask) { - StatsSetupConst.clearColumnStatsState(tbl.getParameters()); - } + boolean areBasicStatsInvalid = !this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER); + boolean areColStatsInvalid = !hasFollowingStatsTask; try { if (isSkewedStoreAsSubdir) { @@ -2530,6 +2525,9 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType } alterTable(tbl, false, environmentContext, true); + if (areBasicStatsInvalid || areColStatsInvalid) { + invalidateTblStats(tbl, areBasicStatsInvalid, areColStatsInvalid); + } if (AcidUtils.isTransactionalTable(tbl)) { addWriteNotificationLog(tbl, null, newFiles, writeId); @@ -2572,12 +2570,16 @@ public Partition createPartition(Table tbl, Map partSpec) throws new ArrayList(size); AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + Boolean isAccurate = addPartitionDesc.areBasicStatsAccurate(); for (int i = 0; i < size; ++i) { org.apache.hadoop.hive.metastore.api.Partition tmpPart = convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf); if (tmpPart != null && tableSnapshot != null && tableSnapshot.getWriteId() > 0) { tmpPart.setWriteId(tableSnapshot.getWriteId()); } + if (isAccurate != null) { + tmpPart.setIsStatsCompliant(isAccurate); + } in.add(tmpPart); } List out = new ArrayList(); @@ -5452,4 +5454,96 @@ public StorageHandlerInfo getStorageHandlerInfo(Table table) throw new HiveException(e); } } + + public void invalidateTblStats( + Table tbl, boolean areBasicStatsInvalid, boolean areColStatsInvalid) throws HiveException { + invalidateStats(tbl.getCatalogName(), tbl.getDbName(), tbl.getTableName(), + null, areBasicStatsInvalid, areColStatsInvalid); + } + + private void invalidatePartStats( + Partition part, boolean areColStatsInvalid, boolean areBasicStatsInvalid) throws HiveException { + Table tbl = part.getTable(); + invalidateStats(tbl.getCatalogName(), tbl.getDbName(), tbl.getTableName(), + part.getName(), areBasicStatsInvalid, areColStatsInvalid); + } + + + public void invalidateStats(String catName, String dbName, String tableName, String partName, + boolean areBasicStatsInvalid, boolean areColStatsInvalid) throws HiveException { + if (catName == null) { + catName = getDefaultCatalog(conf); + } + try { + AcidUtils.TableSnapshot tableSnapshot = ensureSnapshot(catName, dbName, tableName); + + long writeId = tableSnapshot == null ? 0 : tableSnapshot.getWriteId(); + if (areBasicStatsInvalid) { + if (partName == null) { + getMSC().alterTableBasicStats(catName, dbName, tableName, false, null, + writeId, tableSnapshot == null ? null : tableSnapshot.getValidWriteIdList()); + } else { + getMSC().alterPartitionBasicStats(catName, dbName, tableName, partName, false, null, + writeId, tableSnapshot == null ? null : tableSnapshot.getValidWriteIdList()); + } + } + if (areColStatsInvalid) { + getMSC().invalidateAllColumnStats(catName, dbName, tableName, null, writeId); + } + } catch (MetaException e) { + throw new HiveException("Unable to update stats; " + e.getMessage(), e); + } catch (TException e) { + throw new HiveException("Unable to update stats; " + e.getMessage(), e); + } + } + + + public void updateBasicTableStats(String catName, String dbName, + String tableName, boolean isAccurate, Map newStats) throws HiveException { + if (catName == null) { + catName = getDefaultCatalog(conf); + } + try { + AcidUtils.TableSnapshot tableSnapshot = ensureSnapshot(catName, dbName, tableName); + getMSC().alterTableBasicStats(catName, dbName, tableName, isAccurate, newStats, + tableSnapshot == null ? 0 : tableSnapshot.getWriteId(), + tableSnapshot == null ? null : tableSnapshot.getValidWriteIdList()); + } catch (TException e) { + throw new HiveException("Unable to update stats; " + e.getMessage(), e); + } + } + + + private AcidUtils.TableSnapshot ensureSnapshot(String catName, String dbName, String tableName) + throws MetaException, TException, LockException, AssertionError { + org.apache.hadoop.hive.metastore.api.Table tbl = getMSC().getTable(catName, dbName, tableName); + if (AcidUtils.isTransactionalTable(tbl)) return null; + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshotForTxnTable( + conf, dbName, tableName, true); + if (tableSnapshot == null) { + LOG.warn("Cannot get a table snapshot for " + tableName); + } + return tableSnapshot; + } + + + public void updateBasicPartitionsStats(String catName, String dbName, + String tableName, boolean isAccurate, List partNames, + List> partStats) throws HiveException { + if (catName == null) { + catName = getDefaultCatalog(conf); + } + try { + AcidUtils.TableSnapshot tableSnapshot = ensureSnapshot(catName, dbName, tableName); + long writeId = tableSnapshot == null ? 0 : tableSnapshot.getWriteId(); + String validWriteIds = tableSnapshot == null ? null : tableSnapshot.getValidWriteIdList(); + for (int i = 0; i < partNames.size(); ++i) { + // TODO: change the API to a bulk call? not using ColumnStatsDesc. + getMSC().alterPartitionBasicStats(catName, dbName, tableName, partNames.get(i), + isAccurate, partStats.get(i), writeId, validWriteIds); + } + } catch (TException e) { + throw new HiveException("Unable to update stats; " + e.getMessage(), e); + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 3240f2d315..6414461032 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -504,8 +504,7 @@ private void createTempTable(org.apache.hadoop.hive.metastore.api.Table tbl, // Add temp table info to current session Table tTable = new Table(tbl); if (!isVirtualTable) { - StatsSetupConst.setStatsStateForCreateTable(tbl.getParameters(), - org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getColumnNamesForTable(tbl), StatsSetupConst.TRUE); + StatsSetupConst.setStatsStateForCreateTable(tbl.getParameters()); } if (tables == null) { tables = new HashMap(); @@ -627,11 +626,6 @@ private boolean needToUpdateStats(Map props, EnvironmentContext e props.put(stat, "0"); } } - //first set basic stats to true - StatsSetupConst.setBasicStatsState(props, StatsSetupConst.TRUE); - environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK); - //then invalidate column stats - StatsSetupConst.clearColumnStatsState(props); return statsPresent; } @@ -664,6 +658,8 @@ private void truncateTempTable(org.apache.hadoop.hive.metastore.api.Table table) EnvironmentContext environmentContext = new EnvironmentContext(); if (needToUpdateStats(table.getParameters(), environmentContext)) { + // Use alter table here, since this is a temp table. + table.setIsStatsCompliant(true); alter_table_with_environmentContext(table.getDbName(), table.getTableName(), table, environmentContext); } } catch (Exception e) { @@ -797,9 +793,9 @@ private boolean updateTempTableColumnStats(String dbName, String tableName, List colNames = new ArrayList<>(); for (ColumnStatisticsObj obj : colStats.getStatsObj()) { colNames.add(obj.getColName()); + obj.setIsStatsCompliant(true); } org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbName, tableName); - StatsSetupConst.setColumnStatsState(table.getParameters(), colNames); return true; } @@ -810,6 +806,7 @@ private static void mergeColumnStats(Map oldStats, for (ColumnStatisticsObj colStat : newColList) { // This is admittedly a bit simple, StatsObjectConverter seems to allow // old stats attributes to be kept if the new values do not overwrite them. + // TODO: ...and it is probably incorrect oldStats.put(colStat.getColName().toLowerCase(), colStat); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java index 36cd46aa43..df3cb7f728 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java @@ -640,6 +640,7 @@ static void formatOutput(String name, String value, StringBuilder tableInfo, List ret = new ArrayList<>(); ret.add(col.getName()); ret.add(col.getType()); + ret.add((columnStatisticsObj.isIsStatsCompliant() ? "" : "not ") + "accurate"); if (isColStatsAvailable) { if (columnStatisticsObj != null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java index 705365b74c..cecb859126 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java @@ -169,12 +169,10 @@ public void describeTable(DataOutputStream outStream, String colPath, output += mdt.renderTable(isOutputPadded); } } else { - String statsState; - if (tbl.getParameters() != null && (statsState = tbl.getParameters().get(StatsSetupConst.COLUMN_STATS_ACCURATE)) != null) { + if (tbl.getTTable().isSetIsStatsCompliant()) { StringBuilder str = new StringBuilder(); - MetaDataFormatUtils.formatOutput(StatsSetupConst.COLUMN_STATS_ACCURATE, - isFormatted ? StringEscapeUtils.escapeJava(statsState) : HiveStringUtils.escapeJava(statsState), - str, isOutputPadded); + MetaDataFormatUtils.formatOutput("Basic stats accurate", + Boolean.toString(tbl.getTTable().isIsStatsCompliant()), str, isOutputPadded); output = output.concat(str.toString()); } } @@ -610,7 +608,7 @@ public void showResourcePlans(DataOutputStream out, List resourc /** * Class to print text records for resource plans in the following format: - * + * * [status=,parallelism=,defaultPool=] * [allocFraction=,schedulingPolicy=,parallelism=] * > : if(){} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java index b8d4375112..43735b93ae 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java @@ -465,11 +465,6 @@ else if (udaf instanceof GenericUDAFCount) { Logger.debug("Table doesn't have up to date stats " + tbl.getTableName()); return null; } - if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(tbl, tbl.getParameters(), colName)) { - Logger.debug("Stats for table : " + tbl.getTableName() + " column " + colName - + " are not up to date."); - return null; - } List stats = hive.getMSC().getTableColumnStatistics( @@ -480,7 +475,13 @@ else if (udaf instanceof GenericUDAFCount) { Logger.debug("No stats for " + tbl.getTableName() + " column " + colName); return null; } - Long nullCnt = getNullcountFor(type, stats.get(0).getStatsData()); + ColumnStatisticsObj stat = stats.get(0); + if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(tbl, stat)) { + Logger.debug("Stats for table : " + tbl.getTableName() + " column " + colName + + " are not up to date."); + return null; + } + Long nullCnt = getNullcountFor(type, stat.getStatsData()); if (null == nullCnt) { Logger.debug("Unsupported type: " + desc.getTypeString() + " encountered in " + "metadata optimizer for column : " + colName); @@ -529,13 +530,7 @@ else if (udaf instanceof GenericUDAFCount) { ExprNodeColumnDesc colDesc = (ExprNodeColumnDesc)exprMap.get(((ExprNodeColumnDesc)aggr.getParameters().get(0)).getColumn()); String colName = colDesc.getColumn(); StatType type = getType(colDesc.getTypeString()); - if(!tbl.isPartitioned()) { - if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(tbl, tbl.getParameters(), colName)) { - Logger.debug("Stats for table : " + tbl.getTableName() + " column " + colName - + " are not up to date."); - return null; - } - + if (!tbl.isPartitioned()) { List stats = hive.getMSC().getTableColumnStatistics( tbl.getDbName(), tbl.getTableName(), @@ -546,6 +541,12 @@ else if (udaf instanceof GenericUDAFCount) { return null; } ColumnStatisticsData statData = stats.get(0).getStatsData(); + if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(tbl, stats.get(0))) { + Logger.debug("Stats for table : " + tbl.getTableName() + " column " + colName + + " are not up to date."); + return null; + } + String name = colDesc.getTypeString().toUpperCase(); switch (type) { case Integer: { @@ -675,17 +676,23 @@ else if (udaf instanceof GenericUDAFCount) { String colName = colDesc.getColumn(); StatType type = getType(colDesc.getTypeString()); if (!tbl.isPartitioned()) { - if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(tbl, tbl.getParameters(), colName)) { + List stats = + hive.getMSC().getTableColumnStatistics( + tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName), + tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); + if (stats.isEmpty()) { + return null; + } + + if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(tbl, stats.get(0))) { Logger.debug("Stats for table : " + tbl.getTableName() + " column " + colName + " are not up to date."); return null; } - ColumnStatisticsData statData = - hive.getMSC().getTableColumnStatistics( - tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName), - tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null) - .get(0).getStatsData(); + + ColumnStatisticsData statData = stats.get(0).getStatsData(); String name = colDesc.getTypeString().toUpperCase(); + switch (type) { case Integer: { LongSubType subType = LongSubType.valueOf(name); @@ -908,11 +915,6 @@ private ColumnStatisticsData validateSingleColStat(List sta Hive hive, Table tbl, String colName, Set parts) throws TException, LockException { List partNames = new ArrayList(parts.size()); for (Partition part : parts) { - if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(part.getTable(), part.getParameters(), colName)) { - Logger.debug("Stats for part : " + part.getSpec() + " column " + colName - + " are not up to date."); - return null; - } partNames.add(part.getName()); } AcidUtils.TableSnapshot tableSnapshot = @@ -925,6 +927,14 @@ private ColumnStatisticsData validateSingleColStat(List sta Logger.debug("Received " + result.size() + " stats for " + parts.size() + " partitions"); return null; } + for (List stats : result.values()) { + for (ColumnStatisticsObj stat : stats) { + if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(tbl, stat)) { + Logger.debug("Stats for column " + colName + " are not up to date."); + return null; + } + } + } return result.values(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java index 37e6d4c850..597b13e729 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java @@ -557,6 +557,7 @@ private void updateColStats(Set projIndxLst, boolean allowMissingStats) // no need to make a metastore call rowCount = 0; hiveColStats = new ArrayList(); + // Note: we don't set isAccurate here; no metastore objects to check. for (int i = 0; i < nonPartColNamesThatRqrStats.size(); i++) { // add empty stats object for each column hiveColStats.add( diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java index f7712e6c33..87761848ff 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java @@ -297,7 +297,7 @@ private ColStatistics extractColStats(RexInputRef ref) { ColStatistics colStats = table.getColStat(Lists.newArrayList(columnOrigin.getOriginColumnOrdinal()), false).get(0); if (colStats != null && StatsUtils.areColumnStatsUptoDateForQueryAnswering( - table.getHiveTableMD(), table.getHiveTableMD().getParameters(), colStats.getColumnName())) { + table.getHiveTableMD(), colStats.isAccurate())) { return colStats; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index 01179c805f..5490381217 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -1423,6 +1423,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, cs.setCountDistint(stats.getNumRows()); cs.setNumNulls(0); cs.setAvgColLen(StatsUtils.getAvgColLenOf(conf, ci.getObjectInspector(), colType)); + cs.setIsAccurate(false); aggColStats.add(cs); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 2b9f763692..eafb5e3a37 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -3557,8 +3557,7 @@ private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boole if (desc.getPartParams() == null) { desc.setPartParams(new HashMap()); } - StatsSetupConst.setStatsStateForCreateTable(desc.getPartParams(), - MetaStoreUtils.getColumnNames(tab.getCols()), StatsSetupConst.TRUE); + StatsSetupConst.setStatsStateForCreateTable(desc.getPartParams()); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index eb594f825d..6dfd290a39 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -253,7 +253,7 @@ public static boolean prepareImport(boolean isImportCmd, boolean inReplicationScope = false; if ((replicationSpec != null) && replicationSpec.isInReplicationScope()){ tblDesc.setReplicationSpec(replicationSpec); - StatsSetupConst.setBasicStatsState(tblDesc.getTblProps(), StatsSetupConst.FALSE); + tblDesc.setAreStatAccurate(false); inReplicationScope = true; } @@ -278,8 +278,8 @@ public static boolean prepareImport(boolean isImportCmd, for (Partition partition : partitions) { // TODO: this should ideally not create AddPartitionDesc per partition AddPartitionDesc partsDesc = getBaseAddPartitionDescFromPartition(fromPath, dbname, tblDesc, partition); - if (inReplicationScope){ - StatsSetupConst.setBasicStatsState(partsDesc.getPartition(0).getPartParams(), StatsSetupConst.FALSE); + if (inReplicationScope) { + partsDesc.setBasicStatsAccurate(false); } partitionDescs.add(partsDesc); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 474c793ec3..39ec50f16c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -205,6 +205,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc; +import org.apache.hadoop.hive.ql.plan.UpdateStatsDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc.SampleDesc; import org.apache.hadoop.hive.ql.plan.ForwardDesc; import org.apache.hadoop.hive.ql.plan.GroupByDesc; @@ -6928,17 +6929,10 @@ private void genPartnCols(String dest, Operator input, QB qb, } } - @SuppressWarnings("unchecked") private void setStatsForNonNativeTable(String dbName, String tableName) throws SemanticException { - String qTableName = DDLSemanticAnalyzer.getDotName(new String[] { dbName, - tableName }); - AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, null, false); - HashMap mapProp = new HashMap<>(); - mapProp.put(StatsSetupConst.COLUMN_STATS_ACCURATE, null); - alterTblDesc.setOldName(qTableName); - alterTblDesc.setProps(mapProp); - alterTblDesc.setDropIfExists(true); - this.rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); + UpdateStatsDesc desc = new UpdateStatsDesc( + null, dbName, tableName, null, UpdateStatsDesc.OpType.INVALIDATE_ALL); + this.rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java index d3a87f9772..b6f7fcda91 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java @@ -155,6 +155,7 @@ public void setOutputFormat(String outputFormat) { List partitions = null; boolean replaceMode = false; private ReplicationSpec replicationSpec = null; + private Boolean areBasicStatsAccurate = null; /** @@ -324,4 +325,13 @@ public ReplicationSpec getReplicationSpec(){ } return this.replicationSpec; } + + public void setBasicStatsAccurate(boolean b) { + this.areBasicStatsAccurate = b; + + } + + public Boolean areBasicStatsAccurate() { + return areBasicStatsAccurate; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ColStatistics.java ql/src/java/org/apache/hadoop/hive/ql/plan/ColStatistics.java index a31f965a5f..f6233e8fbd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ColStatistics.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ColStatistics.java @@ -30,6 +30,7 @@ private Range range; private boolean isPrimaryKey; private boolean isEstimated; + private boolean isAccurate; public ColStatistics(String colName, String colType) { this.setColumnName(colName); @@ -152,9 +153,18 @@ public ColStatistics clone() { if (range != null ) { clone.setRange(range.clone()); } + clone.setIsAccurate(isAccurate); return clone; } + public void setIsAccurate(boolean isAccurate) { + this.isAccurate = isAccurate; + } + + public boolean isAccurate() { + return isAccurate; + } + public boolean isPrimaryKey() { return isPrimaryKey; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index 0fadf1b61f..bf4b3b86e7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -109,6 +109,7 @@ // The FSOP configuration for the FSOP that is going to write initial data during ctas. // This is not needed beyond compilation, so it is transient. private transient FileSinkDesc writer; + private Boolean areStatsAccurate = null; public CreateTableDesc() { } @@ -873,12 +874,13 @@ public Table toTable(HiveConf conf) throws HiveException { if (!this.isCTAS && (tbl.getPath() == null || (tbl.isEmpty() && !isExternal()))) { if (!tbl.isPartitioned() && conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), - MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE); + StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters()); + tbl.getTTable().setIsStatsCompliant(true); } + } else if (areStatsAccurate != null) { + tbl.getTTable().setIsStatsCompliant(areStatsAccurate); } else { - StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), null, - StatsSetupConst.FALSE); + tbl.getTTable().setIsStatsCompliant(false); } return tbl; } @@ -891,7 +893,7 @@ public Long getInitialMmWriteId() { return initialMmWriteId; } - + public FileSinkDesc getAndUnsetWriter() { FileSinkDesc fsd = writer; @@ -902,4 +904,8 @@ public FileSinkDesc getAndUnsetWriter() { public void setWriter(FileSinkDesc writer) { this.writer = writer; } + + public void setAreStatsAccurate(boolean b) { + this.areStatsAccurate = b; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java index f0f7b18d19..e70245d118 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java @@ -68,6 +68,7 @@ private Map serdeProps; // only used for materialized views private Set tablesUsed; // only used for materialized views private ReplicationSpec replicationSpec = null; + private Boolean areStatsAccurate = null; /** * For serialization only. @@ -365,6 +366,8 @@ public Table toTable(HiveConf conf) throws HiveException { tbl.setOutputFormatClass(getOutputFormat()); } + tbl.getTTable().setIsStatsCompliant(areStatsAccurate); + if (isMaterialized()) { if (getLocation() != null) { tbl.setDataLocation(new Path(getLocation())); @@ -412,4 +415,8 @@ public Table toTable(HiveConf conf) throws HiveException { return tbl; } + + public void setAreStatsAccurate(boolean b) { + this.areStatsAccurate = b; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java index 8ed3b03a84..c3bba5078a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java @@ -113,6 +113,8 @@ private AlterTablePartMergeFilesDesc mergeFilesDesc; private CacheMetadataDesc cacheMetadataDesc; + private UpdateStatsDesc updateStatsDesc; + public DDLWork() { } @@ -415,6 +417,11 @@ public DDLWork(HashSet inputs, HashSet outputs, this.addPartitionDesc = addPartitionDesc; } + public DDLWork(HashSet inputs, HashSet outputs, UpdateStatsDesc desc) { + this(inputs, outputs); + this.updateStatsDesc = desc; + } + /** * @param renamePartitionDesc * information about the partitions we want to add. @@ -1356,4 +1363,8 @@ public CreateOrDropTriggerToPoolMappingDesc getTriggerToPoolMappingDesc() { public void setTriggerToPoolMappingDesc(CreateOrDropTriggerToPoolMappingDesc triggerToPoolMappingDesc) { this.triggerToPoolMappingDesc = triggerToPoolMappingDesc; } + + public UpdateStatsDesc getUpdateStatsDesc() { + return updateStatsDesc; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index ef7325fe2c..842d768bf4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -359,4 +359,15 @@ public Table toTable(HiveConf conf) throws Exception { return null; } } + + public void setAreStatAccurate(boolean b) { + switch (getDescType()) { + case TABLE: + createTblDesc.setAreStatsAccurate(b); + case VIEW: + createViewDesc.setAreStatsAccurate(b); + default: + throw new AssertionError(getDescType()); + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/UpdateStatsDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/UpdateStatsDesc.java new file mode 100644 index 0000000000..824460d802 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/plan/UpdateStatsDesc.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.plan; + +import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; + +public class UpdateStatsDesc extends DDLDesc implements DDLDescWithWriteId { + private static final long serialVersionUID = 1L; + + public enum OpType { + INVALIDATE_ALL, // TODO: add as needed - set stats, and such + } + + private final String catName, dbName, tableName, partName; + private final OpType opType; + private long writeId; + + public UpdateStatsDesc(String catName, String dbName, String tableName, + String partName, OpType opType) { + super(); + this.catName = catName; + this.dbName = dbName; + this.tableName = tableName; + this.partName = partName; + this.opType = opType; + } + + @Override + public void setWriteId(long writeId) { + this.writeId = writeId; + } + + @Override + public String getFullTableName() { + return catName + "." + dbName + "." + tableName; + } + + @Override + public boolean mayNeedWriteId() { + return true; + } + + public String getCatName() { + return catName; + } + + public String getDbName() { + return dbName; + } + + public String getTableName() { + return tableName; + } + + public String getPartName() { + return partName; + } + + public OpType getOpType() { + return opType; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java index 53b3065a88..222a9c1a63 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java @@ -19,7 +19,9 @@ package org.apache.hadoop.hive.ql.stats; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -128,7 +130,8 @@ public String getName() { static class FooterStatCollector implements Runnable { private Partish partish; - private Object result; + private Map newStats; + private boolean areStatsValid, isPartition; private JobConf jc; private Path dir; private FileSystem fs; @@ -146,15 +149,10 @@ public String apply(FooterStatCollector sc) { return String.format("%s#%s", sc.partish.getTable().getCompleteName(), sc.partish.getPartishType()); } }; - private static final Function EXTRACT_RESULT_FUNCTION = new Function() { - @Override - public Partition apply(FooterStatCollector input) { - return (Partition) input.result; - } - }; + private boolean isValid() { - return result != null; + return newStats != null; } public void init(HiveConf conf, LogHelper console) throws IOException { @@ -166,7 +164,7 @@ public void init(HiveConf conf, LogHelper console) throws IOException { @Override public void run() { - Map parameters = partish.getPartParameters(); + Map stats = new HashMap<>(); try { long numRows = 0; long rawDataSize = 0; @@ -213,21 +211,16 @@ public void run() { } } - StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE); - - parameters.put(StatsSetupConst.ROW_COUNT, String.valueOf(numRows)); - parameters.put(StatsSetupConst.RAW_DATA_SIZE, String.valueOf(rawDataSize)); - parameters.put(StatsSetupConst.TOTAL_SIZE, String.valueOf(fileSize)); - parameters.put(StatsSetupConst.NUM_FILES, String.valueOf(numFiles)); - parameters.put(StatsSetupConst.NUM_ERASURE_CODED_FILES, String.valueOf(numErasureCodedFiles)); + stats.put(StatsSetupConst.ROW_COUNT, String.valueOf(numRows)); + stats.put(StatsSetupConst.RAW_DATA_SIZE, String.valueOf(rawDataSize)); + stats.put(StatsSetupConst.TOTAL_SIZE, String.valueOf(fileSize)); + stats.put(StatsSetupConst.NUM_FILES, String.valueOf(numFiles)); + stats.put(StatsSetupConst.NUM_ERASURE_CODED_FILES, String.valueOf(numErasureCodedFiles)); + newStats = stats; + areStatsValid = true; + isPartition = partish.getPartition() != null; - if (partish.getPartition() != null) { - result = new Partition(partish.getTable(), partish.getPartition().getTPartition()); - } else { - result = new Table(partish.getTable().getTTable()); - } - - String msg = partish.getSimpleName() + " stats: [" + toString(parameters) + ']'; + String msg = partish.getSimpleName() + " stats: [" + toString(stats) + ']'; LOG.debug(msg); console.printInfo(msg); @@ -321,7 +314,7 @@ private int updatePartitions(Hive db, List scs, Table table } if (work.isStatsReliable()) { for (FooterStatCollector statsCollection : scs) { - if (statsCollection.result == null) { + if (statsCollection.newStats == null) { LOG.debug("Stats requested to be reliable. Empty stats found: {}", statsCollection.partish.getSimpleName()); return -1; } @@ -357,17 +350,24 @@ private int updatePartitions(Hive db, List scs, Table table throw new RuntimeException("very intresting"); } - if (values.get(0).result instanceof Table) { - db.alterTable(tableFullName, (Table) values.get(0).result, environmentContext, true); + FooterStatCollector f0 = values.get(0); + Table t = f0.partish.getTable(); + if (!f0.isPartition) { + db.updateBasicTableStats( + t.getCatName(), t.getDbName(), t.getTableName(), f0.areStatsValid, f0.newStats); LOG.debug("Updated stats for {}.", tableFullName); } else { - if (values.get(0).result instanceof Partition) { - List results = Lists.transform(values, FooterStatCollector.EXTRACT_RESULT_FUNCTION); - db.alterPartitions(tableFullName, results, environmentContext, true); - LOG.debug("Bulk updated {} partitions of {}.", results.size(), tableFullName); - } else { - throw new RuntimeException("inconsistent"); + List partNames = new ArrayList<>(values.size()); + List> partStats = new ArrayList<>(values.size()); + boolean isAccurate = true; // Currently we only send true flag from here. Improve? + for (FooterStatCollector fc : values) { + partNames.add(fc.partish.getPartition().getName()); + partStats.add(fc.newStats); + isAccurate = isAccurate && fc.areStatsValid; } + db.updateBasicPartitionsStats( + t.getCatName(), t.getDbName(), t.getTableName(), isAccurate, partNames, partStats); + LOG.debug("Bulk updated {} partitions of {}.", partStats.size(), tableFullName); } } LOG.debug("Updated stats for: {}", tableFullName); diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java index b9b4a442b7..039f13c709 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java @@ -111,12 +111,13 @@ public String getName() { } private static class BasicStatsProcessor { - private Partish partish; private List partfileStatus; + private Map newStats; private boolean isMissingAcidState = false; private BasicStatsWork work; private boolean followedColStats1; + private boolean doSetBasicToAccurate, doInvalidateColStats; public BasicStatsProcessor(Partish partish, BasicStatsWork work, HiveConf conf, boolean followedColStats2) { this.partish = partish; @@ -124,20 +125,16 @@ public BasicStatsProcessor(Partish partish, BasicStatsWork work, HiveConf conf, followedColStats1 = followedColStats2; } - public Object process(StatsAggregator statsAggregator) throws HiveException, MetaException { - Partish p = partish; - Map parameters = p.getPartParameters(); - if (work.isTargetRewritten()) { - StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE); - } + public boolean process(StatsAggregator statsAggregator) throws HiveException, MetaException { + newStats = StatsSetupConst.extractStats(partish.getPartParameters()); + doSetBasicToAccurate = work.isTargetRewritten(); + // work.getTableSpecs() == null means it is not analyze command // and then if it is not followed by column stats, we should clean // column stats // FIXME: move this to ColStat related part - if (!work.isExplicitAnalyze() && !followedColStats1) { - StatsSetupConst.clearColumnStatsState(parameters); - } + doInvalidateColStats = !work.isExplicitAnalyze() && !followedColStats1; if (partfileStatus == null) { // This may happen if ACID state is absent from config. @@ -145,32 +142,31 @@ public Object process(StatsAggregator statsAggregator) throws HiveException, Met : partish.getPartition().getSpec().toString(); LOG.warn("Partition/partfiles is null for: " + spec); if (isMissingAcidState) { - MetaStoreUtils.clearQuickStats(parameters); - return p.getOutput(); + MetaStoreUtils.clearQuickStats(newStats); + return true; } - return null; + return false; } // The collectable stats for the aggregator needs to be cleared. // For eg. if a file is being loaded, the old number of rows are not valid // XXX: makes no sense for me... possibly not needed anymore - if (work.isClearAggregatorStats()) { - // we choose to keep the invalid stats and only change the setting. - StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE); - } + // we choose to keep the invalid stats and only change the setting. + doSetBasicToAccurate = doSetBasicToAccurate && work.isClearAggregatorStats(); - MetaStoreUtils.populateQuickStats(partfileStatus, parameters); + // TODO: remove this crap + MetaStoreUtils.populateQuickStats(partfileStatus, newStats); if (statsAggregator != null) { // Update stats for transactional tables (MM, or full ACID with overwrite), even // though we are marking stats as not being accurate. - if (StatsSetupConst.areBasicStatsUptoDate(parameters) || p.isTransactionalTable()) { - String prefix = getAggregationPrefix(p.getTable(), p.getPartition()); - updateStats(statsAggregator, parameters, prefix); + if (doSetBasicToAccurate || partish.isTransactionalTable()) { + String prefix = getAggregationPrefix(partish.getTable(), partish.getPartition()); + updateStats(statsAggregator, newStats, prefix); } } - return p.getOutput(); + return true; } public void collectFileStatus(Warehouse wh, HiveConf conf) throws MetaException, IOException { @@ -259,11 +255,14 @@ private int aggregateStats(Hive db) { BasicStatsProcessor basicStatsProcessor = new BasicStatsProcessor(p, work, conf, followedColStats); basicStatsProcessor.collectFileStatus(wh, conf); - Table res = (Table) basicStatsProcessor.process(statsAggregator); - if (res == null) { + if (!basicStatsProcessor.process(statsAggregator)) { return 0; } - db.alterTable(tableFullName, res, environmentContext, true); + if (basicStatsProcessor.doInvalidateColStats) { + db.invalidateStats(table.getCatName(), table.getDbName(), table.getTableName(), null, false, true); + } + db.updateBasicTableStats(table.getCatName(), table.getDbName(), table.getTableName(), + basicStatsProcessor.doSetBasicToAccurate, basicStatsProcessor.newStats); if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) { console.printInfo("Table " + tableFullName + " stats: [" + toString(p.getPartParameters()) + ']'); diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/ColumnStatisticsObjTranslator.java ql/src/java/org/apache/hadoop/hive/ql/stats/ColumnStatisticsObjTranslator.java index 31c96826b0..705886d204 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/ColumnStatisticsObjTranslator.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/ColumnStatisticsObjTranslator.java @@ -59,6 +59,7 @@ public static ColumnStatisticsObj readHiveStruct(String columnName, String colum ColumnStatisticsObj statsObj = new ColumnStatisticsObj(); statsObj.setColName(columnName); statsObj.setColType(columnType); + statsObj.setIsStatsCompliant(true); try { unpackStructObject(foi, f, fieldName, statsObj); return statsObj; diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java index a50ec18b8a..14f2f39f4e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java @@ -19,10 +19,13 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; @@ -31,7 +34,6 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; @@ -235,19 +237,34 @@ private void stopWorkers() { } Collections.sort(allCols); if (table.getPartitionKeysSize() == 0) { - Map params = table.getParameters(); - List colsToUpdate = null; long writeId = isTxn ? table.getWriteId() : -1; + boolean areBasicStatsValid = table.isSetIsStatsCompliant() && table.isIsStatsCompliant(); + if (isTxn) { + areBasicStatsValid = ObjectStore.isCurrentStatsValidForTheQuery( + conf, areBasicStatsValid, writeId, writeIdString, false); + } + + ColumnStatistics existingStats = null; + try { + // Note: this should NOT do txn verification - we want to get outdated stats, to + // see if we need to update anything. + existingStats = rs.getTableColumnStatistics(cat, db, tbl, allCols); + } catch (NoSuchObjectException e) { + LOG.error("Cannot retrieve existing stats, skipping " + fullTableName, e); + return null; + } + Collection colsToUpdate = null; if (isExistingOnly) { - // Get the existing stats, including the txn state if any, to see if we need to update. - colsToUpdate = getExistingNonPartTableStatsToUpdate( - fullTableName, cat, db, tbl, params, writeId, allCols, writeIdString); + colsToUpdate = getExistingStatsToUpdate( + existingStats, writeIdString, writeId, areBasicStatsValid); } else { - colsToUpdate = getAnyStatsToUpdate(db, tbl, allCols, params, writeId, writeIdString); + colsToUpdate = getAnyStatsToUpdate( + existingStats, db, tbl, allCols, areBasicStatsValid, writeId, writeIdString); } - LOG.debug("Columns to update are {}; existing only: {}, out of: {} based on {}", - colsToUpdate, isExistingOnly, allCols, params); + + LOG.debug("Columns to update are {}; existing only: {}, out of: {}", + colsToUpdate, isExistingOnly, allCols); if (colsToUpdate == null || colsToUpdate.isEmpty()) { return null; // No update necessary. @@ -284,15 +301,15 @@ private void stopWorkers() { // TODO: ideally when col-stats-accurate stuff is stored in some sane structure, this should // retrieve partsToUpdate in a single query; no checking partition params in java. List partNames = null; - Map> colsPerPartition = null; + Map> colsPerPartitionAcc = null, colsPerPartitionInacc = null; boolean isAllParts = true; if (isExistingOnly) { // Make sure the number of partitions we get, and the number of stats objects, is consistent. rs.openTransaction(); boolean isOk = false; try { - colsPerPartition = rs.getPartitionColsWithStats(cat, db, tbl); - partNames = Lists.newArrayList(colsPerPartition.keySet()); + colsPerPartitionInacc = rs.getPartitionColsWithAccurateStats(cat, db, tbl, writeIdString, false); + partNames = Lists.newArrayList(colsPerPartitionInacc.keySet()); int partitionCount = rs.getNumPartitionsByFilter(cat, db, tbl, ""); isAllParts = partitionCount == partNames.size(); isOk = true; @@ -304,10 +321,12 @@ private void stopWorkers() { } } } else { + colsPerPartitionAcc = rs.getPartitionColsWithAccurateStats(cat, db, tbl, writeIdString, true); partNames = rs.listPartitionNames(cat, db, tbl, (short) -1); isAllParts = true; } Table t = rs.getTable(cat, db, tbl); + boolean isTxn = AcidUtils.isTransactionalTable(t); List currentBatch = null; int nextBatchStart = 0, nextIxInBatch = -1, currentBatchStart = 0; List colsToUpdateForAll = null; @@ -329,6 +348,7 @@ private void stopWorkers() { } nextIxInBatch = 0; } + int currentIxInBatch = nextIxInBatch++; Partition part = currentBatch.get(currentIxInBatch); String partName = Warehouse.makePartName(t.getPartitionKeys(), part.getValues()); @@ -347,17 +367,37 @@ private void stopWorkers() { isAllParts = false; continue; } + boolean areBasicStatsAccurate = part.isSetIsStatsCompliant() && part.isIsStatsCompliant(); + if (isTxn) { + areBasicStatsAccurate = ObjectStore.isCurrentStatsValidForTheQuery( + conf, areBasicStatsAccurate, part.getWriteId(), writeIdString, false); + } // Find which columns we need to update for this partition, if any. - List colsToMaybeUpdate = allCols; - if (isExistingOnly) { - colsToMaybeUpdate = colsPerPartition.get(partName); - Collections.sort(colsToMaybeUpdate); + List colsToUpdate = allCols; + if (areBasicStatsAccurate) { + if (isExistingOnly) { + colsToUpdate = colsPerPartitionInacc.get(partName); + } else { + List colsAccurate = colsPerPartitionAcc.get(partName); + if (colsAccurate != null) { + if (colsAccurate.size() >= allCols.size()) { + continue; // No need to update anything for this partition. + } else if (!colsAccurate.isEmpty()) { + // A subset of columns is accurate, which should be rare + // (or we just got turned on after some partial analyze). + colsToUpdate = new ArrayList<>(colsToUpdate.size() - colsAccurate.size()); + Set colsAccurateSet = new HashSet<>(colsAccurate); + for (String col : allCols) { + if (colsAccurateSet.contains(col)) continue; + colsToUpdate.add(col); + } + } + } + } } - List colsToUpdate = getAnyStatsToUpdate(db, tbl, colsToMaybeUpdate, params, - writeIdString == null ? -1 : part.getWriteId(), writeIdString); - LOG.debug("Updating {} based on {} and {}", colsToUpdate, colsToMaybeUpdate, params); + LOG.debug("Updating {}", colsToUpdate); if (colsToUpdate == null || colsToUpdate.isEmpty()) { if (isAllParts) { @@ -433,55 +473,51 @@ private String buildPartColStr(Table table) { return partColStr; } - private List getExistingNonPartTableStatsToUpdate(TableName fullTableName, - String cat, String db, String tbl, Map params, long statsWriteId, - List allCols, String writeIdString) throws MetaException { - ColumnStatistics existingStats = null; - try { - // Note: this should NOT do txn verification - we want to get outdated stats, to - // see if we need to update anything. - existingStats = rs.getTableColumnStatistics(cat, db, tbl, allCols); - } catch (NoSuchObjectException e) { - LOG.error("Cannot retrieve existing stats, skipping " + fullTableName, e); - return null; - } - // TODO: we should probably skip updating if writeId is from an active txn - boolean isTxnValid = (writeIdString == null) || ObjectStore.isCurrentStatsValidForTheQuery( - conf, params, statsWriteId , writeIdString, false); - return getExistingStatsToUpdate(existingStats, params, isTxnValid); - } - - private List getExistingStatsToUpdate( - ColumnStatistics existingStats, Map params, boolean isTxnValid) { - boolean hasAnyAccurate = isTxnValid && StatsSetupConst.areBasicStatsUptoDate(params); + private List getExistingStatsToUpdate(ColumnStatistics existingStats, + String writeIdString, long statsWriteId, boolean isBasicValid) throws MetaException { List colsToUpdate = new ArrayList<>(); for (ColumnStatisticsObj obj : existingStats.getStatsObj()) { String col = obj.getColName(); - if (!hasAnyAccurate || !StatsSetupConst.areColumnStatsUptoDate(params, col)) { + if (!isBasicValid) { + colsToUpdate.add(col); + continue; + } + // Unlike other paths, here we only pass in writeIdString == null for non-txn tables. + if ((writeIdString == null) || ObjectStore.isCurrentStatsValidForTheQuery( + conf, obj.isSetIsStatsCompliant() && obj.isIsStatsCompliant(), obj.getWriteId(), + writeIdString, false)) { colsToUpdate.add(col); } } return colsToUpdate; } - private List getAnyStatsToUpdate(String db, String tbl, List allCols, - Map params, long statsWriteId, String writeIdString) throws MetaException { + private List getAnyStatsToUpdate(ColumnStatistics existingStats, String db, String tbl, + List allCols, boolean areBasicStatsValid, long statsWriteId, String writeIdString) + throws MetaException { // Note: we only run "for columns" command and assume no basic stats means no col stats. - if (!StatsSetupConst.areBasicStatsUptoDate(params)) { + if (!areBasicStatsValid) { return allCols; } // TODO: we should probably skip updating if writeId is from an active txn if (writeIdString != null && !ObjectStore.isCurrentStatsValidForTheQuery( - conf, params, statsWriteId, writeIdString, false)) { + conf, areBasicStatsValid, statsWriteId, writeIdString, false)) { return allCols; } - List colsToUpdate = new ArrayList<>(); - for (String col : allCols) { - if (!StatsSetupConst.areColumnStatsUptoDate(params, col)) { - colsToUpdate.add(col); + HashSet colSet = null; + for (ColumnStatisticsObj obj : existingStats.getStatsObj()) { + if (ObjectStore.isCurrentStatsValidForTheQuery(conf, + obj.isSetIsStatsCompliant() && obj.isIsStatsCompliant(), statsWriteId, writeIdString, false)) { + if (colSet == null) { + colSet = new HashSet<>(allCols); + } + if (!colSet.remove(obj.getColName())) { + LOG.warn("Column " + obj.getColName() + " was not found in the set"); + } } } - return colsToUpdate; + + return colSet == null ? allCols : new ArrayList<>(colSet); } private List getTablesToCheck() throws MetaException, NoSuchObjectException { @@ -559,9 +595,9 @@ private static String makeFullPartName(TableName tableName, String partName) { private final static class AnalyzeWork { TableName tableName; String partName, allParts; - List cols; + Collection cols; - public AnalyzeWork(TableName tableName, String partName, String allParts, List cols) { + public AnalyzeWork(TableName tableName, String partName, String allParts, Collection cols) { this.tableName = tableName; this.partName = partName; this.allParts = allParts; diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java index ae51b4db51..b70c71d19b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java @@ -128,7 +128,6 @@ /** * Collect table, partition and column level statistics - * Note: DOES NOT CHECK txn stats. * @param conf * - hive configuration * @param partList @@ -227,7 +226,6 @@ private static void estimateStatsForMissingCols(List neededColumns, List } } - /** Note: DOES NOT CHECK txn stats. */ public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList partList, Table table, List schema, List neededColumns, ColumnStatsList colStatsCache, List referencedColumns, boolean fetchColStats) @@ -266,10 +264,7 @@ private static Statistics collectStatistics(HiveConf conf, PrunedPartitionList p long numErasureCodedFiles = getErasureCodedFiles(table); if (fetchColStats) { - // Note: this is currently called from two notable places (w/false for checkTxn) - // 1) StatsRulesProcFactory.TableScanStatsRule via collectStatistics - // 2) RelOptHiveTable via getColStats and updateColStats. - colStats = getTableColumnStats(table, schema, neededColumns, colStatsCache, false); + colStats = getTableColumnStats(table, schema, neededColumns, colStatsCache); if(colStats == null) { colStats = Lists.newArrayList(); } @@ -389,11 +384,8 @@ private static Statistics collectStatistics(HiveConf conf, PrunedPartitionList p // size is 0, aggrStats is null after several retries. Thus, we can // skip the step to connect to the metastore. if (neededColsToRetrieve.size() > 0 && partNames.size() > 0) { - // Note: this is currently called from two notable places (w/false for checkTxn) - // 1) StatsRulesProcFactory.TableScanStatsRule via collectStatistics - // 2) RelOptHiveTable via getColStats and updateColStats. aggrStats = Hive.get().getAggrColStatsFor(table.getDbName(), table.getTableName(), - neededColsToRetrieve, partNames, false); + neededColsToRetrieve, partNames, true); } boolean statsRetrieved = aggrStats != null && @@ -598,6 +590,7 @@ public static ColStatistics getColStatsForPartCol(ColumnInfo ci,PartitionIterabl ci.getObjectInspector(), partCS.getColumnType())); partCS.setRange(getRangePartitionColumn(partList, ci.getInternalName(), ci.getType().getTypeName(), conf.getVar(ConfVars.DEFAULTPARTITIONNAME))); + partCS.setIsAccurate(true); // We have all the values so that would be accurate. return partCS; } @@ -843,6 +836,7 @@ public static ColStatistics getColStatistics(ColumnStatisticsObj cso, String tab String colName) { String colTypeLowerCase = cso.getColType().toLowerCase(); ColStatistics cs = new ColStatistics(colName, colTypeLowerCase); + cs.setIsAccurate(cso.isSetIsStatsCompliant() && cso.isIsStatsCompliant()); ColumnStatisticsData csd = cso.getStatsData(); if (colTypeLowerCase.equals(serdeConstants.TINYINT_TYPE_NAME) || colTypeLowerCase.equals(serdeConstants.SMALLINT_TYPE_NAME) @@ -926,6 +920,7 @@ private static ColStatistics estimateColStats(long numRows, String colName, Hive ColumnInfo cinfo = getColumnInfoForColumn(colName, schema); ColStatistics cs = new ColStatistics(colName, cinfo.getTypeName()); cs.setIsEstimated(true); + cs.setIsAccurate(false); String colTypeLowerCase = cinfo.getTypeName().toLowerCase(); @@ -1004,7 +999,7 @@ else if(colTypeLowerCase.equals(serdeConstants.SMALLINT_TYPE_NAME)){ */ public static List getTableColumnStats( Table table, List schema, List neededColumns, - ColumnStatsList colStatsCache, boolean checkTransactional) { + ColumnStatsList colStatsCache) { if (table.isMaterializedTable()) { LOG.debug("Materialized table does not contain table statistics"); return null; @@ -1033,7 +1028,7 @@ else if(colTypeLowerCase.equals(serdeConstants.SMALLINT_TYPE_NAME)){ List stats = null; try { List colStat = Hive.get().getTableColumnStatistics( - dbName, tabName, colStatsToRetrieve, checkTransactional); + dbName, tabName, colStatsToRetrieve, true); stats = convertColStats(colStat, tabName); } catch (HiveException e) { LOG.error("Failed to retrieve table statistics: ", e); @@ -1507,7 +1502,9 @@ public static ColStatistics getColStatisticsFromExpression(HiveConf conf, Statis long numNulls = 0; ObjectInspector oi = end.getWritableObjectInspector(); long numRows = parentStats.getNumRows(); - + // We are getting expression statistics that are always valid; the parent-derived stats + // derive the accuracy from the parent. For most cases, set accurate to true. + boolean isAccurate = false; if (end instanceof ExprNodeColumnDesc) { // column projection ExprNodeColumnDesc encd = (ExprNodeColumnDesc) end; @@ -1546,6 +1543,7 @@ public static ColStatistics getColStatisticsFromExpression(HiveConf conf, Statis } else { countDistincts = 1; } + isAccurate = true; } else if (end instanceof ExprNodeGenericFuncDesc) { ExprNodeGenericFuncDesc engfd = (ExprNodeGenericFuncDesc) end; colName = engfd.getName(); @@ -1565,6 +1563,7 @@ public static ColStatistics getColStatisticsFromExpression(HiveConf conf, Statis return newStats; } } + isAccurate = false; // fallback to default countDistincts = getNDVFor(engfd, numRows, parentStats); @@ -1575,6 +1574,7 @@ public static ColStatistics getColStatisticsFromExpression(HiveConf conf, Statis colName = Joiner.on(",").join(encd.getCols()); colType = serdeConstants.LIST_TYPE_NAME; countDistincts = numRows; + isAccurate = true; } else if (end instanceof ExprNodeFieldDesc) { // field within complex type @@ -1582,6 +1582,7 @@ public static ColStatistics getColStatisticsFromExpression(HiveConf conf, Statis colName = enfd.getFieldName(); colType = enfd.getTypeString(); countDistincts = numRows; + isAccurate = true; } else { throw new IllegalArgumentException("not supported expr type " + end.getClass()); } @@ -1589,6 +1590,8 @@ public static ColStatistics getColStatisticsFromExpression(HiveConf conf, Statis colType = colType.toLowerCase(); avgColSize = getAvgColLenOf(conf, oi, colType); ColStatistics colStats = new ColStatistics(colName, colType); + + colStats.setIsAccurate(isAccurate); colStats.setAvgColLen(avgColSize); colStats.setCountDistint(countDistincts); colStats.setNumNulls(numNulls); @@ -1922,18 +1925,28 @@ public static boolean areBasicStatsUptoDateForQueryAnswering(Table table, Map params, String colName) { + public static boolean areColumnStatsUptoDateForQueryAnswering(Table table, ColumnStatisticsObj cso) { + return areColumnStatsUptoDateForQueryAnswering(table, + cso.isSetIsStatsCompliant() && cso.isIsStatsCompliant()); + } + + /** + * Are the column stats for the table up-to-date for query planning. + * Can run additional checks compared to the version in StatsSetupConst. + */ + public static boolean areColumnStatsUptoDateForQueryAnswering(Table table, boolean isAccurate) { // HIVE-19332: external tables should not be considered to have up-to-date stats. if (MetaStoreUtils.isExternalTable(table.getTTable())) { return false; } - return StatsSetupConst.areColumnStatsUptoDate(params, colName); + + return isAccurate; } } diff --git ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHiveReduceExpressionsWithStatsRule.java ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHiveReduceExpressionsWithStatsRule.java index 183f1279ad..4cfdaf5e07 100644 --- ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHiveReduceExpressionsWithStatsRule.java +++ ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHiveReduceExpressionsWithStatsRule.java @@ -92,7 +92,7 @@ public void before() { builder = HiveRelFactories.HIVE_BUILDER.create(optCluster, schemaMock); - StatsSetupConst.setStatsStateForCreateTable(tableParams, Lists.newArrayList("_int"), StatsSetupConst.TRUE); + StatsSetupConst.setStatsStateForCreateTable(tableParams); tableParams.put(StatsSetupConst.ROW_COUNT, "3"); } diff --git ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java index 55131f3c2b..202fb4997c 100644 --- ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java +++ ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hive.ql.stats; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.File; import java.util.ArrayList; @@ -28,23 +30,24 @@ import org.apache.curator.shaded.com.google.common.collect.Lists; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; import org.apache.hadoop.hive.ql.DriverUtils; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.thrift.TException; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -198,7 +201,7 @@ public void testTxnTable() throws Exception { badWriteId = msClient.allocateTableWriteId(badTxnId, dbName, tblName); tbl = msClient.getTable(dbName, tblName); tbl.setWriteId(badWriteId); - StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); + tbl.setIsStatsCompliant(false); msClient.alter_table(null, dbName, tblName, tbl, new EnvironmentContext(), initialWriteIds.toString()); // Stats should not be valid. @@ -545,68 +548,69 @@ private void setPartitionSkipProperty( private void verifyAndUnsetColStats( String tblName, List cols, IMetaStoreClient msClient) throws Exception { Table tbl = msClient.getTable(ss.getCurrentDatabase(), tblName); - verifyAndUnsetColStatsVal(tbl.getParameters(), cols); - EnvironmentContext ec = new EnvironmentContext(); - // Make sure metastore doesn't mess with our bogus stats updates. - ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); - msClient.alter_table_with_environmentContext(tbl.getDbName(), tbl.getTableName(), tbl, ec); + verifyStats(cols, msClient, tbl, true, true, null); + long txnId = msClient.openTxn("test"); + long writeId = msClient.allocateTableWriteId(txnId, tbl.getDbName(), tbl.getTableName()); + msClient.invalidateAllColumnStats(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), null, writeId); + msClient.commitTxn(txnId); // Double-check. tbl = msClient.getTable(ss.getCurrentDatabase(), tblName); - for (String col : cols) { - assertFalse(StatsSetupConst.areColumnStatsUptoDate(tbl.getParameters(), col)); - } + verifyStats(cols, msClient, tbl, true, false, null); } - private void verifyAndUnsetColStatsVal(Map params, List cols) { - assertTrue(StatsSetupConst.areBasicStatsUptoDate(params)); - for (String col : cols) { - assertTrue(StatsSetupConst.areColumnStatsUptoDate(params, col)); + private void verifyStats(List cols, IMetaStoreClient msClient, Table tbl, + boolean basicState, boolean colState, String validWriteIds) throws NoSuchObjectException, MetaException, TException { + assertEquals(basicState, tbl.isIsStatsCompliant()); + List stats = msClient.getTableColumnStatistics( + tbl.getDbName(), tbl.getTableName(), cols, validWriteIds); + for (ColumnStatisticsObj obj : stats) { + assertEquals(colState, obj.isIsStatsCompliant()); } - StatsSetupConst.removeColumnStatsState(params, cols); - StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE); } - private void verifyAndUnsetColStats(String tblName, String partName, List cols, - IMetaStoreClient msClient) throws Exception { + private void verifyAndUnsetColStats( + String tblName, String partName, List cols, IMetaStoreClient msClient) throws Exception { Partition part = msClient.getPartition(ss.getCurrentDatabase(), tblName, partName); - verifyAndUnsetColStatsVal(part.getParameters(), cols); - EnvironmentContext ec = new EnvironmentContext(); - // Make sure metastore doesn't mess with our bogus stats updates. - ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); - msClient.alter_partition(part.getCatName(), part.getDbName(), tblName, part, ec); + verifyStats(cols, msClient, part, partName, true, true, null); + long txnId = msClient.openTxn("test"); + long writeId = msClient.allocateTableWriteId(txnId, part.getDbName(), part.getTableName()); + msClient.invalidateAllColumnStats( + part.getCatName(), part.getDbName(), part.getTableName(), partName, writeId); + msClient.commitTxn(txnId); // Double-check. part = msClient.getPartition(ss.getCurrentDatabase(), tblName, partName); - for (String col : cols) { - assertFalse(StatsSetupConst.areColumnStatsUptoDate(part.getParameters(), col)); + verifyStats(cols, msClient, part, partName, true, false, null); + } + + private void verifyStats(List cols, IMetaStoreClient msClient, Partition part, String partName, + boolean basicState, boolean colState, String validWriteIds) throws NoSuchObjectException, MetaException, TException { + assertEquals(basicState, part.isIsStatsCompliant()); + Map> statsMap = msClient.getPartitionColumnStatistics( + part.getCatName(), part.getDbName(), part.getTableName(), + Lists.newArrayList(partName), cols, validWriteIds); + List stats = statsMap.values().iterator().next(); + for (ColumnStatisticsObj obj : stats) { + assertEquals(colState, obj.isIsStatsCompliant()); } } + private void verifyStatsUpToDate(String tbl, List cols, IMetaStoreClient msClient, boolean isUpToDate) throws Exception { Table table = msClient.getTable(ss.getCurrentDatabase(), tbl); - verifyStatsUpToDate(table.getParameters(), cols, isUpToDate); + verifyStats(cols, msClient, table, isUpToDate, isUpToDate, null); } private void verifyStatsUpToDate(String tbl, List cols, IMetaStoreClient msClient, String validWriteIds, boolean isUpToDate) throws Exception { Table table = msClient.getTable(ss.getCurrentCatalog(), ss.getCurrentDatabase(), tbl, validWriteIds); - verifyStatsUpToDate(table.getParameters(), cols, isUpToDate); - } - - private void verifyStatsUpToDate(Map params, List cols, - boolean isUpToDate) { - if (isUpToDate) { - assertTrue(StatsSetupConst.areBasicStatsUptoDate(params)); - } - for (String col : cols) { - assertEquals(isUpToDate, StatsSetupConst.areColumnStatsUptoDate(params, col)); - } + verifyStats(cols, msClient, table, isUpToDate, isUpToDate, validWriteIds); } private void verifyStatsUpToDate(String tbl, String part, ArrayList cols, IMetaStoreClient msClient, boolean isUpToDate) throws Exception { Partition partition = msClient.getPartition(ss.getCurrentDatabase(), tbl, part); - verifyStatsUpToDate(partition.getParameters(), cols, isUpToDate); + verifyStats(cols, msClient, partition, part, isUpToDate, isUpToDate, null); } private void executeQuery(String query) throws HiveException { diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java index 7dcfc170cc..b914888b1f 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AbortTxnsRequest st case 1: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list594 = iprot.readListBegin(); - struct.txn_ids = new ArrayList(_list594.size); - long _elem595; - for (int _i596 = 0; _i596 < _list594.size; ++_i596) + org.apache.thrift.protocol.TList _list604 = iprot.readListBegin(); + struct.txn_ids = new ArrayList(_list604.size); + long _elem605; + for (int _i606 = 0; _i606 < _list604.size; ++_i606) { - _elem595 = iprot.readI64(); - struct.txn_ids.add(_elem595); + _elem605 = iprot.readI64(); + struct.txn_ids.add(_elem605); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AbortTxnsRequest s oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txn_ids.size())); - for (long _iter597 : struct.txn_ids) + for (long _iter607 : struct.txn_ids) { - oprot.writeI64(_iter597); + oprot.writeI64(_iter607); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txn_ids.size()); - for (long _iter598 : struct.txn_ids) + for (long _iter608 : struct.txn_ids) { - oprot.writeI64(_iter598); + oprot.writeI64(_iter608); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest st public void read(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list599 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txn_ids = new ArrayList(_list599.size); - long _elem600; - for (int _i601 = 0; _i601 < _list599.size; ++_i601) + org.apache.thrift.protocol.TList _list609 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txn_ids = new ArrayList(_list609.size); + long _elem610; + for (int _i611 = 0; _i611 < _list609.size; ++_i611) { - _elem600 = iprot.readI64(); - struct.txn_ids.add(_elem600); + _elem610 = iprot.readI64(); + struct.txn_ids.add(_elem610); } } struct.setTxn_idsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java index 8ece410555..593f660e2f 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddCheckConstraintR case 1: // CHECK_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list416 = iprot.readListBegin(); - struct.checkConstraintCols = new ArrayList(_list416.size); - SQLCheckConstraint _elem417; - for (int _i418 = 0; _i418 < _list416.size; ++_i418) + org.apache.thrift.protocol.TList _list426 = iprot.readListBegin(); + struct.checkConstraintCols = new ArrayList(_list426.size); + SQLCheckConstraint _elem427; + for (int _i428 = 0; _i428 < _list426.size; ++_i428) { - _elem417 = new SQLCheckConstraint(); - _elem417.read(iprot); - struct.checkConstraintCols.add(_elem417); + _elem427 = new SQLCheckConstraint(); + _elem427.read(iprot); + struct.checkConstraintCols.add(_elem427); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddCheckConstraint oprot.writeFieldBegin(CHECK_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraintCols.size())); - for (SQLCheckConstraint _iter419 : struct.checkConstraintCols) + for (SQLCheckConstraint _iter429 : struct.checkConstraintCols) { - _iter419.write(oprot); + _iter429.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddCheckConstraintR TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.checkConstraintCols.size()); - for (SQLCheckConstraint _iter420 : struct.checkConstraintCols) + for (SQLCheckConstraint _iter430 : struct.checkConstraintCols) { - _iter420.write(oprot); + _iter430.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddCheckConstraintR public void read(org.apache.thrift.protocol.TProtocol prot, AddCheckConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list421 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraintCols = new ArrayList(_list421.size); - SQLCheckConstraint _elem422; - for (int _i423 = 0; _i423 < _list421.size; ++_i423) + org.apache.thrift.protocol.TList _list431 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraintCols = new ArrayList(_list431.size); + SQLCheckConstraint _elem432; + for (int _i433 = 0; _i433 < _list431.size; ++_i433) { - _elem422 = new SQLCheckConstraint(); - _elem422.read(iprot); - struct.checkConstraintCols.add(_elem422); + _elem432 = new SQLCheckConstraint(); + _elem432.read(iprot); + struct.checkConstraintCols.add(_elem432); } } struct.setCheckConstraintColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java index 8a125d854e..0b6d8402ee 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDefaultConstrain case 1: // DEFAULT_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list408 = iprot.readListBegin(); - struct.defaultConstraintCols = new ArrayList(_list408.size); - SQLDefaultConstraint _elem409; - for (int _i410 = 0; _i410 < _list408.size; ++_i410) + org.apache.thrift.protocol.TList _list418 = iprot.readListBegin(); + struct.defaultConstraintCols = new ArrayList(_list418.size); + SQLDefaultConstraint _elem419; + for (int _i420 = 0; _i420 < _list418.size; ++_i420) { - _elem409 = new SQLDefaultConstraint(); - _elem409.read(iprot); - struct.defaultConstraintCols.add(_elem409); + _elem419 = new SQLDefaultConstraint(); + _elem419.read(iprot); + struct.defaultConstraintCols.add(_elem419); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDefaultConstrai oprot.writeFieldBegin(DEFAULT_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraintCols.size())); - for (SQLDefaultConstraint _iter411 : struct.defaultConstraintCols) + for (SQLDefaultConstraint _iter421 : struct.defaultConstraintCols) { - _iter411.write(oprot); + _iter421.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDefaultConstrain TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.defaultConstraintCols.size()); - for (SQLDefaultConstraint _iter412 : struct.defaultConstraintCols) + for (SQLDefaultConstraint _iter422 : struct.defaultConstraintCols) { - _iter412.write(oprot); + _iter422.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDefaultConstrain public void read(org.apache.thrift.protocol.TProtocol prot, AddDefaultConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list413 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraintCols = new ArrayList(_list413.size); - SQLDefaultConstraint _elem414; - for (int _i415 = 0; _i415 < _list413.size; ++_i415) + org.apache.thrift.protocol.TList _list423 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraintCols = new ArrayList(_list423.size); + SQLDefaultConstraint _elem424; + for (int _i425 = 0; _i425 < _list423.size; ++_i425) { - _elem414 = new SQLDefaultConstraint(); - _elem414.read(iprot); - struct.defaultConstraintCols.add(_elem414); + _elem424 = new SQLDefaultConstraint(); + _elem424.read(iprot); + struct.defaultConstraintCols.add(_elem424); } } struct.setDefaultConstraintColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java index 45618e781c..333b1ee477 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java @@ -816,13 +816,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartition case 5: // PARTITIONNAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list716 = iprot.readListBegin(); - struct.partitionnames = new ArrayList(_list716.size); - String _elem717; - for (int _i718 = 0; _i718 < _list716.size; ++_i718) + org.apache.thrift.protocol.TList _list726 = iprot.readListBegin(); + struct.partitionnames = new ArrayList(_list726.size); + String _elem727; + for (int _i728 = 0; _i728 < _list726.size; ++_i728) { - _elem717 = iprot.readString(); - struct.partitionnames.add(_elem717); + _elem727 = iprot.readString(); + struct.partitionnames.add(_elem727); } iprot.readListEnd(); } @@ -872,9 +872,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitio oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionnames.size())); - for (String _iter719 : struct.partitionnames) + for (String _iter729 : struct.partitionnames) { - oprot.writeString(_iter719); + oprot.writeString(_iter729); } oprot.writeListEnd(); } @@ -910,9 +910,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartition oprot.writeString(struct.tablename); { oprot.writeI32(struct.partitionnames.size()); - for (String _iter720 : struct.partitionnames) + for (String _iter730 : struct.partitionnames) { - oprot.writeString(_iter720); + oprot.writeString(_iter730); } } BitSet optionals = new BitSet(); @@ -937,13 +937,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions struct.tablename = iprot.readString(); struct.setTablenameIsSet(true); { - org.apache.thrift.protocol.TList _list721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionnames = new ArrayList(_list721.size); - String _elem722; - for (int _i723 = 0; _i723 < _list721.size; ++_i723) + org.apache.thrift.protocol.TList _list731 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionnames = new ArrayList(_list731.size); + String _elem732; + for (int _i733 = 0; _i733 < _list731.size; ++_i733) { - _elem722 = iprot.readString(); - struct.partitionnames.add(_elem722); + _elem732 = iprot.readString(); + struct.partitionnames.add(_elem732); } } struct.setPartitionnamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java index 0f443d4f60..cea42b4303 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddForeignKeyReques case 1: // FOREIGN_KEY_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list384 = iprot.readListBegin(); - struct.foreignKeyCols = new ArrayList(_list384.size); - SQLForeignKey _elem385; - for (int _i386 = 0; _i386 < _list384.size; ++_i386) + org.apache.thrift.protocol.TList _list394 = iprot.readListBegin(); + struct.foreignKeyCols = new ArrayList(_list394.size); + SQLForeignKey _elem395; + for (int _i396 = 0; _i396 < _list394.size; ++_i396) { - _elem385 = new SQLForeignKey(); - _elem385.read(iprot); - struct.foreignKeyCols.add(_elem385); + _elem395 = new SQLForeignKey(); + _elem395.read(iprot); + struct.foreignKeyCols.add(_elem395); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddForeignKeyReque oprot.writeFieldBegin(FOREIGN_KEY_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeyCols.size())); - for (SQLForeignKey _iter387 : struct.foreignKeyCols) + for (SQLForeignKey _iter397 : struct.foreignKeyCols) { - _iter387.write(oprot); + _iter397.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyReques TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.foreignKeyCols.size()); - for (SQLForeignKey _iter388 : struct.foreignKeyCols) + for (SQLForeignKey _iter398 : struct.foreignKeyCols) { - _iter388.write(oprot); + _iter398.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyReques public void read(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeyCols = new ArrayList(_list389.size); - SQLForeignKey _elem390; - for (int _i391 = 0; _i391 < _list389.size; ++_i391) + org.apache.thrift.protocol.TList _list399 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeyCols = new ArrayList(_list399.size); + SQLForeignKey _elem400; + for (int _i401 = 0; _i401 < _list399.size; ++_i401) { - _elem390 = new SQLForeignKey(); - _elem390.read(iprot); - struct.foreignKeyCols.add(_elem390); + _elem400 = new SQLForeignKey(); + _elem400.read(iprot); + struct.foreignKeyCols.add(_elem400); } } struct.setForeignKeyColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java index 0266bba2b0..9c718e2c10 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddNotNullConstrain case 1: // NOT_NULL_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list400 = iprot.readListBegin(); - struct.notNullConstraintCols = new ArrayList(_list400.size); - SQLNotNullConstraint _elem401; - for (int _i402 = 0; _i402 < _list400.size; ++_i402) + org.apache.thrift.protocol.TList _list410 = iprot.readListBegin(); + struct.notNullConstraintCols = new ArrayList(_list410.size); + SQLNotNullConstraint _elem411; + for (int _i412 = 0; _i412 < _list410.size; ++_i412) { - _elem401 = new SQLNotNullConstraint(); - _elem401.read(iprot); - struct.notNullConstraintCols.add(_elem401); + _elem411 = new SQLNotNullConstraint(); + _elem411.read(iprot); + struct.notNullConstraintCols.add(_elem411); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddNotNullConstrai oprot.writeFieldBegin(NOT_NULL_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraintCols.size())); - for (SQLNotNullConstraint _iter403 : struct.notNullConstraintCols) + for (SQLNotNullConstraint _iter413 : struct.notNullConstraintCols) { - _iter403.write(oprot); + _iter413.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstrain TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.notNullConstraintCols.size()); - for (SQLNotNullConstraint _iter404 : struct.notNullConstraintCols) + for (SQLNotNullConstraint _iter414 : struct.notNullConstraintCols) { - _iter404.write(oprot); + _iter414.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstrain public void read(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list405 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraintCols = new ArrayList(_list405.size); - SQLNotNullConstraint _elem406; - for (int _i407 = 0; _i407 < _list405.size; ++_i407) + org.apache.thrift.protocol.TList _list415 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraintCols = new ArrayList(_list415.size); + SQLNotNullConstraint _elem416; + for (int _i417 = 0; _i417 < _list415.size; ++_i417) { - _elem406 = new SQLNotNullConstraint(); - _elem406.read(iprot); - struct.notNullConstraintCols.add(_elem406); + _elem416 = new SQLNotNullConstraint(); + _elem416.read(iprot); + struct.notNullConstraintCols.add(_elem416); } } struct.setNotNullConstraintColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java index 469a9a8041..ba437ce6f6 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java @@ -866,14 +866,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsReques case 3: // PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list490 = iprot.readListBegin(); - struct.parts = new ArrayList(_list490.size); - Partition _elem491; - for (int _i492 = 0; _i492 < _list490.size; ++_i492) + org.apache.thrift.protocol.TList _list500 = iprot.readListBegin(); + struct.parts = new ArrayList(_list500.size); + Partition _elem501; + for (int _i502 = 0; _i502 < _list500.size; ++_i502) { - _elem491 = new Partition(); - _elem491.read(iprot); - struct.parts.add(_elem491); + _elem501 = new Partition(); + _elem501.read(iprot); + struct.parts.add(_elem501); } iprot.readListEnd(); } @@ -941,9 +941,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsReque oprot.writeFieldBegin(PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.parts.size())); - for (Partition _iter493 : struct.parts) + for (Partition _iter503 : struct.parts) { - _iter493.write(oprot); + _iter503.write(oprot); } oprot.writeListEnd(); } @@ -992,9 +992,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsReques oprot.writeString(struct.tblName); { oprot.writeI32(struct.parts.size()); - for (Partition _iter494 : struct.parts) + for (Partition _iter504 : struct.parts) { - _iter494.write(oprot); + _iter504.write(oprot); } } oprot.writeBool(struct.ifNotExists); @@ -1028,14 +1028,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list495 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.parts = new ArrayList(_list495.size); - Partition _elem496; - for (int _i497 = 0; _i497 < _list495.size; ++_i497) + org.apache.thrift.protocol.TList _list505 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.parts = new ArrayList(_list505.size); + Partition _elem506; + for (int _i507 = 0; _i507 < _list505.size; ++_i507) { - _elem496 = new Partition(); - _elem496.read(iprot); - struct.parts.add(_elem496); + _elem506 = new Partition(); + _elem506.read(iprot); + struct.parts.add(_elem506); } } struct.setPartsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java index a00af609d4..4318320bc9 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java @@ -426,14 +426,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsResult case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list482 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list482.size); - Partition _elem483; - for (int _i484 = 0; _i484 < _list482.size; ++_i484) + org.apache.thrift.protocol.TList _list492 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list492.size); + Partition _elem493; + for (int _i494 = 0; _i494 < _list492.size; ++_i494) { - _elem483 = new Partition(); - _elem483.read(iprot); - struct.partitions.add(_elem483); + _elem493 = new Partition(); + _elem493.read(iprot); + struct.partitions.add(_elem493); } iprot.readListEnd(); } @@ -468,9 +468,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsResul oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter485 : struct.partitions) + for (Partition _iter495 : struct.partitions) { - _iter485.write(oprot); + _iter495.write(oprot); } oprot.writeListEnd(); } @@ -510,9 +510,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter486 : struct.partitions) + for (Partition _iter496 : struct.partitions) { - _iter486.write(oprot); + _iter496.write(oprot); } } } @@ -527,14 +527,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list487 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list487.size); - Partition _elem488; - for (int _i489 = 0; _i489 < _list487.size; ++_i489) + org.apache.thrift.protocol.TList _list497 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list497.size); + Partition _elem498; + for (int _i499 = 0; _i499 < _list497.size; ++_i499) { - _elem488 = new Partition(); - _elem488.read(iprot); - struct.partitions.add(_elem488); + _elem498 = new Partition(); + _elem498.read(iprot); + struct.partitions.add(_elem498); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java index 9069a419ec..c33daaadcd 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPrimaryKeyReques case 1: // PRIMARY_KEY_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list376 = iprot.readListBegin(); - struct.primaryKeyCols = new ArrayList(_list376.size); - SQLPrimaryKey _elem377; - for (int _i378 = 0; _i378 < _list376.size; ++_i378) + org.apache.thrift.protocol.TList _list386 = iprot.readListBegin(); + struct.primaryKeyCols = new ArrayList(_list386.size); + SQLPrimaryKey _elem387; + for (int _i388 = 0; _i388 < _list386.size; ++_i388) { - _elem377 = new SQLPrimaryKey(); - _elem377.read(iprot); - struct.primaryKeyCols.add(_elem377); + _elem387 = new SQLPrimaryKey(); + _elem387.read(iprot); + struct.primaryKeyCols.add(_elem387); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPrimaryKeyReque oprot.writeFieldBegin(PRIMARY_KEY_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeyCols.size())); - for (SQLPrimaryKey _iter379 : struct.primaryKeyCols) + for (SQLPrimaryKey _iter389 : struct.primaryKeyCols) { - _iter379.write(oprot); + _iter389.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyReques TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.primaryKeyCols.size()); - for (SQLPrimaryKey _iter380 : struct.primaryKeyCols) + for (SQLPrimaryKey _iter390 : struct.primaryKeyCols) { - _iter380.write(oprot); + _iter390.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyReques public void read(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeyCols = new ArrayList(_list381.size); - SQLPrimaryKey _elem382; - for (int _i383 = 0; _i383 < _list381.size; ++_i383) + org.apache.thrift.protocol.TList _list391 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeyCols = new ArrayList(_list391.size); + SQLPrimaryKey _elem392; + for (int _i393 = 0; _i393 < _list391.size; ++_i393) { - _elem382 = new SQLPrimaryKey(); - _elem382.read(iprot); - struct.primaryKeyCols.add(_elem382); + _elem392 = new SQLPrimaryKey(); + _elem392.read(iprot); + struct.primaryKeyCols.add(_elem392); } } struct.setPrimaryKeyColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java index c47db4a910..4d32c24dc9 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddUniqueConstraint case 1: // UNIQUE_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list392 = iprot.readListBegin(); - struct.uniqueConstraintCols = new ArrayList(_list392.size); - SQLUniqueConstraint _elem393; - for (int _i394 = 0; _i394 < _list392.size; ++_i394) + org.apache.thrift.protocol.TList _list402 = iprot.readListBegin(); + struct.uniqueConstraintCols = new ArrayList(_list402.size); + SQLUniqueConstraint _elem403; + for (int _i404 = 0; _i404 < _list402.size; ++_i404) { - _elem393 = new SQLUniqueConstraint(); - _elem393.read(iprot); - struct.uniqueConstraintCols.add(_elem393); + _elem403 = new SQLUniqueConstraint(); + _elem403.read(iprot); + struct.uniqueConstraintCols.add(_elem403); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddUniqueConstrain oprot.writeFieldBegin(UNIQUE_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraintCols.size())); - for (SQLUniqueConstraint _iter395 : struct.uniqueConstraintCols) + for (SQLUniqueConstraint _iter405 : struct.uniqueConstraintCols) { - _iter395.write(oprot); + _iter405.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraint TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.uniqueConstraintCols.size()); - for (SQLUniqueConstraint _iter396 : struct.uniqueConstraintCols) + for (SQLUniqueConstraint _iter406 : struct.uniqueConstraintCols) { - _iter396.write(oprot); + _iter406.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraint public void read(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list397 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraintCols = new ArrayList(_list397.size); - SQLUniqueConstraint _elem398; - for (int _i399 = 0; _i399 < _list397.size; ++_i399) + org.apache.thrift.protocol.TList _list407 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraintCols = new ArrayList(_list407.size); + SQLUniqueConstraint _elem408; + for (int _i409 = 0; _i409 < _list407.size; ++_i409) { - _elem398 = new SQLUniqueConstraint(); - _elem398.read(iprot); - struct.uniqueConstraintCols.add(_elem398); + _elem408 = new SQLUniqueConstraint(); + _elem408.read(iprot); + struct.uniqueConstraintCols.add(_elem408); } } struct.setUniqueConstraintColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java index 67d9b10614..47bb863ad7 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java @@ -40,7 +40,6 @@ private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1); private static final org.apache.thrift.protocol.TField PARTS_FOUND_FIELD_DESC = new org.apache.thrift.protocol.TField("partsFound", org.apache.thrift.protocol.TType.I64, (short)2); - private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -50,13 +49,11 @@ private List colStats; // required private long partsFound; // required - private boolean isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { COL_STATS((short)1, "colStats"), - PARTS_FOUND((short)2, "partsFound"), - IS_STATS_COMPLIANT((short)3, "isStatsCompliant"); + PARTS_FOUND((short)2, "partsFound"); private static final Map byName = new HashMap(); @@ -75,8 +72,6 @@ public static _Fields findByThriftId(int fieldId) { return COL_STATS; case 2: // PARTS_FOUND return PARTS_FOUND; - case 3: // IS_STATS_COMPLIANT - return IS_STATS_COMPLIANT; default: return null; } @@ -118,9 +113,7 @@ public String getFieldName() { // isset id assignments private static final int __PARTSFOUND_ISSET_ID = 0; - private static final int __ISSTATSCOMPLIANT_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -129,8 +122,6 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))); tmpMap.put(_Fields.PARTS_FOUND, new org.apache.thrift.meta_data.FieldMetaData("partsFound", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AggrStats.class, metaDataMap); } @@ -161,7 +152,6 @@ public AggrStats(AggrStats other) { this.colStats = __this__colStats; } this.partsFound = other.partsFound; - this.isStatsCompliant = other.isStatsCompliant; } public AggrStats deepCopy() { @@ -173,8 +163,6 @@ public void clear() { this.colStats = null; setPartsFoundIsSet(false); this.partsFound = 0; - setIsStatsCompliantIsSet(false); - this.isStatsCompliant = false; } public int getColStatsSize() { @@ -237,28 +225,6 @@ public void setPartsFoundIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PARTSFOUND_ISSET_ID, value); } - public boolean isIsStatsCompliant() { - return this.isStatsCompliant; - } - - public void setIsStatsCompliant(boolean isStatsCompliant) { - this.isStatsCompliant = isStatsCompliant; - setIsStatsCompliantIsSet(true); - } - - public void unsetIsStatsCompliant() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); - } - - /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ - public boolean isSetIsStatsCompliant() { - return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); - } - - public void setIsStatsCompliantIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case COL_STATS: @@ -277,14 +243,6 @@ public void setFieldValue(_Fields field, Object value) { } break; - case IS_STATS_COMPLIANT: - if (value == null) { - unsetIsStatsCompliant(); - } else { - setIsStatsCompliant((Boolean)value); - } - break; - } } @@ -296,9 +254,6 @@ public Object getFieldValue(_Fields field) { case PARTS_FOUND: return getPartsFound(); - case IS_STATS_COMPLIANT: - return isIsStatsCompliant(); - } throw new IllegalStateException(); } @@ -314,8 +269,6 @@ public boolean isSet(_Fields field) { return isSetColStats(); case PARTS_FOUND: return isSetPartsFound(); - case IS_STATS_COMPLIANT: - return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -351,15 +304,6 @@ public boolean equals(AggrStats that) { return false; } - boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); - boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); - if (this_present_isStatsCompliant || that_present_isStatsCompliant) { - if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) - return false; - if (this.isStatsCompliant != that.isStatsCompliant) - return false; - } - return true; } @@ -377,11 +321,6 @@ public int hashCode() { if (present_partsFound) list.add(partsFound); - boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); - list.add(present_isStatsCompliant); - if (present_isStatsCompliant) - list.add(isStatsCompliant); - return list.hashCode(); } @@ -413,16 +352,6 @@ public int compareTo(AggrStats other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetIsStatsCompliant()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -454,12 +383,6 @@ public String toString() { sb.append("partsFound:"); sb.append(this.partsFound); first = false; - if (isSetIsStatsCompliant()) { - if (!first) sb.append(", "); - sb.append("isStatsCompliant:"); - sb.append(this.isStatsCompliant); - first = false; - } sb.append(")"); return sb.toString(); } @@ -540,14 +463,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AggrStats struct) t org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // IS_STATS_COMPLIANT - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.isStatsCompliant = iprot.readBool(); - struct.setIsStatsCompliantIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -576,11 +491,6 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AggrStats struct) oprot.writeFieldBegin(PARTS_FOUND_FIELD_DESC); oprot.writeI64(struct.partsFound); oprot.writeFieldEnd(); - if (struct.isSetIsStatsCompliant()) { - oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); - oprot.writeBool(struct.isStatsCompliant); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -606,14 +516,6 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) t } } oprot.writeI64(struct.partsFound); - BitSet optionals = new BitSet(); - if (struct.isSetIsStatsCompliant()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetIsStatsCompliant()) { - oprot.writeBool(struct.isStatsCompliant); - } } @Override @@ -633,11 +535,6 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) th struct.setColStatsIsSet(true); struct.partsFound = iprot.readI64(); struct.setPartsFoundIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.isStatsCompliant = iprot.readBool(); - struct.setIsStatsCompliantIsSet(true); - } } } diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java index 5fcb98fb1b..1fb1125bdd 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java @@ -716,13 +716,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 3: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list642 = iprot.readListBegin(); - struct.txnIds = new ArrayList(_list642.size); - long _elem643; - for (int _i644 = 0; _i644 < _list642.size; ++_i644) + org.apache.thrift.protocol.TList _list652 = iprot.readListBegin(); + struct.txnIds = new ArrayList(_list652.size); + long _elem653; + for (int _i654 = 0; _i654 < _list652.size; ++_i654) { - _elem643 = iprot.readI64(); - struct.txnIds.add(_elem643); + _elem653 = iprot.readI64(); + struct.txnIds.add(_elem653); } iprot.readListEnd(); } @@ -742,14 +742,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 5: // SRC_TXN_TO_WRITE_ID_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list645 = iprot.readListBegin(); - struct.srcTxnToWriteIdList = new ArrayList(_list645.size); - TxnToWriteId _elem646; - for (int _i647 = 0; _i647 < _list645.size; ++_i647) + org.apache.thrift.protocol.TList _list655 = iprot.readListBegin(); + struct.srcTxnToWriteIdList = new ArrayList(_list655.size); + TxnToWriteId _elem656; + for (int _i657 = 0; _i657 < _list655.size; ++_i657) { - _elem646 = new TxnToWriteId(); - _elem646.read(iprot); - struct.srcTxnToWriteIdList.add(_elem646); + _elem656 = new TxnToWriteId(); + _elem656.read(iprot); + struct.srcTxnToWriteIdList.add(_elem656); } iprot.readListEnd(); } @@ -786,9 +786,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txnIds.size())); - for (long _iter648 : struct.txnIds) + for (long _iter658 : struct.txnIds) { - oprot.writeI64(_iter648); + oprot.writeI64(_iter658); } oprot.writeListEnd(); } @@ -807,9 +807,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(SRC_TXN_TO_WRITE_ID_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.srcTxnToWriteIdList.size())); - for (TxnToWriteId _iter649 : struct.srcTxnToWriteIdList) + for (TxnToWriteId _iter659 : struct.srcTxnToWriteIdList) { - _iter649.write(oprot); + _iter659.write(oprot); } oprot.writeListEnd(); } @@ -849,9 +849,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI if (struct.isSetTxnIds()) { { oprot.writeI32(struct.txnIds.size()); - for (long _iter650 : struct.txnIds) + for (long _iter660 : struct.txnIds) { - oprot.writeI64(_iter650); + oprot.writeI64(_iter660); } } } @@ -861,9 +861,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI if (struct.isSetSrcTxnToWriteIdList()) { { oprot.writeI32(struct.srcTxnToWriteIdList.size()); - for (TxnToWriteId _iter651 : struct.srcTxnToWriteIdList) + for (TxnToWriteId _iter661 : struct.srcTxnToWriteIdList) { - _iter651.write(oprot); + _iter661.write(oprot); } } } @@ -879,13 +879,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list652 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txnIds = new ArrayList(_list652.size); - long _elem653; - for (int _i654 = 0; _i654 < _list652.size; ++_i654) + org.apache.thrift.protocol.TList _list662 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txnIds = new ArrayList(_list662.size); + long _elem663; + for (int _i664 = 0; _i664 < _list662.size; ++_i664) { - _elem653 = iprot.readI64(); - struct.txnIds.add(_elem653); + _elem663 = iprot.readI64(); + struct.txnIds.add(_elem663); } } struct.setTxnIdsIsSet(true); @@ -896,14 +896,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.srcTxnToWriteIdList = new ArrayList(_list655.size); - TxnToWriteId _elem656; - for (int _i657 = 0; _i657 < _list655.size; ++_i657) + org.apache.thrift.protocol.TList _list665 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.srcTxnToWriteIdList = new ArrayList(_list665.size); + TxnToWriteId _elem666; + for (int _i667 = 0; _i667 < _list665.size; ++_i667) { - _elem656 = new TxnToWriteId(); - _elem656.read(iprot); - struct.srcTxnToWriteIdList.add(_elem656); + _elem666 = new TxnToWriteId(); + _elem666.read(iprot); + struct.srcTxnToWriteIdList.add(_elem666); } } struct.setSrcTxnToWriteIdListIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java index 2a13eba708..4d360bec78 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 1: // TXN_TO_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list658 = iprot.readListBegin(); - struct.txnToWriteIds = new ArrayList(_list658.size); - TxnToWriteId _elem659; - for (int _i660 = 0; _i660 < _list658.size; ++_i660) + org.apache.thrift.protocol.TList _list668 = iprot.readListBegin(); + struct.txnToWriteIds = new ArrayList(_list668.size); + TxnToWriteId _elem669; + for (int _i670 = 0; _i670 < _list668.size; ++_i670) { - _elem659 = new TxnToWriteId(); - _elem659.read(iprot); - struct.txnToWriteIds.add(_elem659); + _elem669 = new TxnToWriteId(); + _elem669.read(iprot); + struct.txnToWriteIds.add(_elem669); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(TXN_TO_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.txnToWriteIds.size())); - for (TxnToWriteId _iter661 : struct.txnToWriteIds) + for (TxnToWriteId _iter671 : struct.txnToWriteIds) { - _iter661.write(oprot); + _iter671.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txnToWriteIds.size()); - for (TxnToWriteId _iter662 : struct.txnToWriteIds) + for (TxnToWriteId _iter672 : struct.txnToWriteIds) { - _iter662.write(oprot); + _iter672.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list663 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.txnToWriteIds = new ArrayList(_list663.size); - TxnToWriteId _elem664; - for (int _i665 = 0; _i665 < _list663.size; ++_i665) + org.apache.thrift.protocol.TList _list673 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.txnToWriteIds = new ArrayList(_list673.size); + TxnToWriteId _elem674; + for (int _i675 = 0; _i675 < _list673.size; ++_i675) { - _elem664 = new TxnToWriteId(); - _elem664.read(iprot); - struct.txnToWriteIds.add(_elem664); + _elem674 = new TxnToWriteId(); + _elem674.read(iprot); + struct.txnToWriteIds.add(_elem674); } } struct.setTxnToWriteIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java index 4d4595a429..f2f6863c27 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java @@ -877,14 +877,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsRequ case 4: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list952 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list952.size); - Partition _elem953; - for (int _i954 = 0; _i954 < _list952.size; ++_i954) + org.apache.thrift.protocol.TList _list962 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list962.size); + Partition _elem963; + for (int _i964 = 0; _i964 < _list962.size; ++_i964) { - _elem953 = new Partition(); - _elem953.read(iprot); - struct.partitions.add(_elem953); + _elem963 = new Partition(); + _elem963.read(iprot); + struct.partitions.add(_elem963); } iprot.readListEnd(); } @@ -952,9 +952,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsReq oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter955 : struct.partitions) + for (Partition _iter965 : struct.partitions) { - _iter955.write(oprot); + _iter965.write(oprot); } oprot.writeListEnd(); } @@ -1000,9 +1000,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequ oprot.writeString(struct.tableName); { oprot.writeI32(struct.partitions.size()); - for (Partition _iter956 : struct.partitions) + for (Partition _iter966 : struct.partitions) { - _iter956.write(oprot); + _iter966.write(oprot); } } BitSet optionals = new BitSet(); @@ -1041,14 +1041,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsReque struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); { - org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list957.size); - Partition _elem958; - for (int _i959 = 0; _i959 < _list957.size; ++_i959) + org.apache.thrift.protocol.TList _list967 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list967.size); + Partition _elem968; + for (int _i969 = 0; _i969 < _list967.size; ++_i969) { - _elem958 = new Partition(); - _elem958.read(iprot); - struct.partitions.add(_elem958); + _elem968 = new Partition(); + _elem968.read(iprot); + struct.partitions.add(_elem968); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java index 68582058ab..3c1fcaaf24 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CheckConstraintsRes case 1: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list368 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList(_list368.size); - SQLCheckConstraint _elem369; - for (int _i370 = 0; _i370 < _list368.size; ++_i370) + org.apache.thrift.protocol.TList _list378 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list378.size); + SQLCheckConstraint _elem379; + for (int _i380 = 0; _i380 < _list378.size; ++_i380) { - _elem369 = new SQLCheckConstraint(); - _elem369.read(iprot); - struct.checkConstraints.add(_elem369); + _elem379 = new SQLCheckConstraint(); + _elem379.read(iprot); + struct.checkConstraints.add(_elem379); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CheckConstraintsRe oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter371 : struct.checkConstraints) + for (SQLCheckConstraint _iter381 : struct.checkConstraints) { - _iter371.write(oprot); + _iter381.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter372 : struct.checkConstraints) + for (SQLCheckConstraint _iter382 : struct.checkConstraints) { - _iter372.write(oprot); + _iter382.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsRes public void read(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraints = new ArrayList(_list373.size); - SQLCheckConstraint _elem374; - for (int _i375 = 0; _i375 < _list373.size; ++_i375) + org.apache.thrift.protocol.TList _list383 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list383.size); + SQLCheckConstraint _elem384; + for (int _i385 = 0; _i385 < _list383.size; ++_i385) { - _elem374 = new SQLCheckConstraint(); - _elem374.read(iprot); - struct.checkConstraints.add(_elem374); + _elem384 = new SQLCheckConstraint(); + _elem384.read(iprot); + struct.checkConstraints.add(_elem384); } } struct.setCheckConstraintsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java index 3fdd295fb1..668b7d3f61 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataRe case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list832 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list832.size); - long _elem833; - for (int _i834 = 0; _i834 < _list832.size; ++_i834) + org.apache.thrift.protocol.TList _list842 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list842.size); + long _elem843; + for (int _i844 = 0; _i844 < _list842.size; ++_i844) { - _elem833 = iprot.readI64(); - struct.fileIds.add(_elem833); + _elem843 = iprot.readI64(); + struct.fileIds.add(_elem843); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataR oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter835 : struct.fileIds) + for (long _iter845 : struct.fileIds) { - oprot.writeI64(_iter835); + oprot.writeI64(_iter845); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter836 : struct.fileIds) + for (long _iter846 : struct.fileIds) { - oprot.writeI64(_iter836); + oprot.writeI64(_iter846); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list837 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list837.size); - long _elem838; - for (int _i839 = 0; _i839 < _list837.size; ++_i839) + org.apache.thrift.protocol.TList _list847 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list847.size); + long _elem848; + for (int _i849 = 0; _i849 < _list847.size; ++_i849) { - _elem838 = iprot.readI64(); - struct.fileIds.add(_elem838); + _elem848 = iprot.readI64(); + struct.fileIds.add(_elem848); } } struct.setFileIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java index f5c9582fa9..75b9d31751 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java @@ -354,13 +354,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClientCapabilities case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list848 = iprot.readListBegin(); - struct.values = new ArrayList(_list848.size); - ClientCapability _elem849; - for (int _i850 = 0; _i850 < _list848.size; ++_i850) + org.apache.thrift.protocol.TList _list858 = iprot.readListBegin(); + struct.values = new ArrayList(_list858.size); + ClientCapability _elem859; + for (int _i860 = 0; _i860 < _list858.size; ++_i860) { - _elem849 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem849); + _elem859 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem859); } iprot.readListEnd(); } @@ -386,9 +386,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClientCapabilities oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size())); - for (ClientCapability _iter851 : struct.values) + for (ClientCapability _iter861 : struct.values) { - oprot.writeI32(_iter851.getValue()); + oprot.writeI32(_iter861.getValue()); } oprot.writeListEnd(); } @@ -413,9 +413,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.values.size()); - for (ClientCapability _iter852 : struct.values) + for (ClientCapability _iter862 : struct.values) { - oprot.writeI32(_iter852.getValue()); + oprot.writeI32(_iter862.getValue()); } } } @@ -424,13 +424,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities public void read(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list853 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.values = new ArrayList(_list853.size); - ClientCapability _elem854; - for (int _i855 = 0; _i855 < _list853.size; ++_i855) + org.apache.thrift.protocol.TList _list863 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.values = new ArrayList(_list863.size); + ClientCapability _elem864; + for (int _i865 = 0; _i865 < _list863.size; ++_i865) { - _elem854 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem854); + _elem864 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem864); } } struct.setValuesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java index fd4619f0e5..cb755007d6 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java @@ -40,7 +40,6 @@ private static final org.apache.thrift.protocol.TField STATS_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("statsDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField STATS_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("statsObj", org.apache.thrift.protocol.TType.LIST, (short)2); - private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -50,13 +49,11 @@ private ColumnStatisticsDesc statsDesc; // required private List statsObj; // required - private boolean isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { STATS_DESC((short)1, "statsDesc"), - STATS_OBJ((short)2, "statsObj"), - IS_STATS_COMPLIANT((short)3, "isStatsCompliant"); + STATS_OBJ((short)2, "statsObj"); private static final Map byName = new HashMap(); @@ -75,8 +72,6 @@ public static _Fields findByThriftId(int fieldId) { return STATS_DESC; case 2: // STATS_OBJ return STATS_OBJ; - case 3: // IS_STATS_COMPLIANT - return IS_STATS_COMPLIANT; default: return null; } @@ -117,9 +112,6 @@ public String getFieldName() { } // isset id assignments - private static final int __ISSTATSCOMPLIANT_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -128,8 +120,6 @@ public String getFieldName() { tmpMap.put(_Fields.STATS_OBJ, new org.apache.thrift.meta_data.FieldMetaData("statsObj", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))); - tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatistics.class, metaDataMap); } @@ -150,7 +140,6 @@ public ColumnStatistics( * Performs a deep copy on other. */ public ColumnStatistics(ColumnStatistics other) { - __isset_bitfield = other.__isset_bitfield; if (other.isSetStatsDesc()) { this.statsDesc = new ColumnStatisticsDesc(other.statsDesc); } @@ -161,7 +150,6 @@ public ColumnStatistics(ColumnStatistics other) { } this.statsObj = __this__statsObj; } - this.isStatsCompliant = other.isStatsCompliant; } public ColumnStatistics deepCopy() { @@ -172,8 +160,6 @@ public ColumnStatistics deepCopy() { public void clear() { this.statsDesc = null; this.statsObj = null; - setIsStatsCompliantIsSet(false); - this.isStatsCompliant = false; } public ColumnStatisticsDesc getStatsDesc() { @@ -237,28 +223,6 @@ public void setStatsObjIsSet(boolean value) { } } - public boolean isIsStatsCompliant() { - return this.isStatsCompliant; - } - - public void setIsStatsCompliant(boolean isStatsCompliant) { - this.isStatsCompliant = isStatsCompliant; - setIsStatsCompliantIsSet(true); - } - - public void unsetIsStatsCompliant() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); - } - - /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ - public boolean isSetIsStatsCompliant() { - return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); - } - - public void setIsStatsCompliantIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case STATS_DESC: @@ -277,14 +241,6 @@ public void setFieldValue(_Fields field, Object value) { } break; - case IS_STATS_COMPLIANT: - if (value == null) { - unsetIsStatsCompliant(); - } else { - setIsStatsCompliant((Boolean)value); - } - break; - } } @@ -296,9 +252,6 @@ public Object getFieldValue(_Fields field) { case STATS_OBJ: return getStatsObj(); - case IS_STATS_COMPLIANT: - return isIsStatsCompliant(); - } throw new IllegalStateException(); } @@ -314,8 +267,6 @@ public boolean isSet(_Fields field) { return isSetStatsDesc(); case STATS_OBJ: return isSetStatsObj(); - case IS_STATS_COMPLIANT: - return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -351,15 +302,6 @@ public boolean equals(ColumnStatistics that) { return false; } - boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); - boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); - if (this_present_isStatsCompliant || that_present_isStatsCompliant) { - if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) - return false; - if (this.isStatsCompliant != that.isStatsCompliant) - return false; - } - return true; } @@ -377,11 +319,6 @@ public int hashCode() { if (present_statsObj) list.add(statsObj); - boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); - list.add(present_isStatsCompliant); - if (present_isStatsCompliant) - list.add(isStatsCompliant); - return list.hashCode(); } @@ -413,16 +350,6 @@ public int compareTo(ColumnStatistics other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetIsStatsCompliant()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -458,12 +385,6 @@ public String toString() { sb.append(this.statsObj); } first = false; - if (isSetIsStatsCompliant()) { - if (!first) sb.append(", "); - sb.append("isStatsCompliant:"); - sb.append(this.isStatsCompliant); - first = false; - } sb.append(")"); return sb.toString(); } @@ -494,8 +415,6 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -548,14 +467,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatistics st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // IS_STATS_COMPLIANT - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.isStatsCompliant = iprot.readBool(); - struct.setIsStatsCompliantIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -586,11 +497,6 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatistics s } oprot.writeFieldEnd(); } - if (struct.isSetIsStatsCompliant()) { - oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); - oprot.writeBool(struct.isStatsCompliant); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -616,14 +522,6 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics st _iter280.write(oprot); } } - BitSet optionals = new BitSet(); - if (struct.isSetIsStatsCompliant()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetIsStatsCompliant()) { - oprot.writeBool(struct.isStatsCompliant); - } } @Override @@ -644,11 +542,6 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics str } } struct.setStatsObjIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.isStatsCompliant = iprot.readBool(); - struct.setIsStatsCompliantIsSet(true); - } } } diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java index 6f9a57fe53..9b3c6d378f 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java @@ -41,6 +41,8 @@ private static final org.apache.thrift.protocol.TField COL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("colName", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField COL_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("colType", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField STATS_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("statsData", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)4); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -51,12 +53,16 @@ private String colName; // required private String colType; // required private ColumnStatisticsData statsData; // required + private boolean isStatsCompliant; // optional + private long writeId; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { COL_NAME((short)1, "colName"), COL_TYPE((short)2, "colType"), - STATS_DATA((short)3, "statsData"); + STATS_DATA((short)3, "statsData"), + IS_STATS_COMPLIANT((short)4, "isStatsCompliant"), + WRITE_ID((short)5, "writeId"); private static final Map byName = new HashMap(); @@ -77,6 +83,10 @@ public static _Fields findByThriftId(int fieldId) { return COL_TYPE; case 3: // STATS_DATA return STATS_DATA; + case 4: // IS_STATS_COMPLIANT + return IS_STATS_COMPLIANT; + case 5: // WRITE_ID + return WRITE_ID; default: return null; } @@ -117,6 +127,10 @@ public String getFieldName() { } // isset id assignments + private static final int __ISSTATSCOMPLIANT_ISSET_ID = 0; + private static final int __WRITEID_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT,_Fields.WRITE_ID}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -126,6 +140,10 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.STATS_DATA, new org.apache.thrift.meta_data.FieldMetaData("statsData", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsData.class))); + tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatisticsObj.class, metaDataMap); } @@ -148,6 +166,7 @@ public ColumnStatisticsObj( * Performs a deep copy on other. */ public ColumnStatisticsObj(ColumnStatisticsObj other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetColName()) { this.colName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.colName); } @@ -157,6 +176,8 @@ public ColumnStatisticsObj(ColumnStatisticsObj other) { if (other.isSetStatsData()) { this.statsData = new ColumnStatisticsData(other.statsData); } + this.isStatsCompliant = other.isStatsCompliant; + this.writeId = other.writeId; } public ColumnStatisticsObj deepCopy() { @@ -168,6 +189,10 @@ public void clear() { this.colName = null; this.colType = null; this.statsData = null; + setIsStatsCompliantIsSet(false); + this.isStatsCompliant = false; + setWriteIdIsSet(false); + this.writeId = 0; } public String getColName() { @@ -239,6 +264,50 @@ public void setStatsDataIsSet(boolean value) { } } + public boolean isIsStatsCompliant() { + return this.isStatsCompliant; + } + + public void setIsStatsCompliant(boolean isStatsCompliant) { + this.isStatsCompliant = isStatsCompliant; + setIsStatsCompliantIsSet(true); + } + + public void unsetIsStatsCompliant() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ + public boolean isSetIsStatsCompliant() { + return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); + } + + public void setIsStatsCompliantIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case COL_NAME: @@ -265,6 +334,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case IS_STATS_COMPLIANT: + if (value == null) { + unsetIsStatsCompliant(); + } else { + setIsStatsCompliant((Boolean)value); + } + break; + + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + } } @@ -279,6 +364,12 @@ public Object getFieldValue(_Fields field) { case STATS_DATA: return getStatsData(); + case IS_STATS_COMPLIANT: + return isIsStatsCompliant(); + + case WRITE_ID: + return getWriteId(); + } throw new IllegalStateException(); } @@ -296,6 +387,10 @@ public boolean isSet(_Fields field) { return isSetColType(); case STATS_DATA: return isSetStatsData(); + case IS_STATS_COMPLIANT: + return isSetIsStatsCompliant(); + case WRITE_ID: + return isSetWriteId(); } throw new IllegalStateException(); } @@ -340,6 +435,24 @@ public boolean equals(ColumnStatisticsObj that) { return false; } + boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); + boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); + if (this_present_isStatsCompliant || that_present_isStatsCompliant) { + if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) + return false; + if (this.isStatsCompliant != that.isStatsCompliant) + return false; + } + + boolean this_present_writeId = true && this.isSetWriteId(); + boolean that_present_writeId = true && that.isSetWriteId(); + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + return true; } @@ -362,6 +475,16 @@ public int hashCode() { if (present_statsData) list.add(statsData); + boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); + list.add(present_isStatsCompliant); + if (present_isStatsCompliant) + list.add(isStatsCompliant); + + boolean present_writeId = true && (isSetWriteId()); + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + return list.hashCode(); } @@ -403,6 +526,26 @@ public int compareTo(ColumnStatisticsObj other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsStatsCompliant()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -446,6 +589,18 @@ public String toString() { sb.append(this.statsData); } first = false; + if (isSetIsStatsCompliant()) { + if (!first) sb.append(", "); + sb.append("isStatsCompliant:"); + sb.append(this.isStatsCompliant); + first = false; + } + if (isSetWriteId()) { + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + } sb.append(")"); return sb.toString(); } @@ -477,6 +632,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -526,6 +683,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatisticsObj org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // IS_STATS_COMPLIANT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -554,6 +727,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatisticsOb struct.statsData.write(oprot); oprot.writeFieldEnd(); } + if (struct.isSetIsStatsCompliant()) { + oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); + oprot.writeBool(struct.isStatsCompliant); + oprot.writeFieldEnd(); + } + if (struct.isSetWriteId()) { + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -574,6 +757,20 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatisticsObj oprot.writeString(struct.colName); oprot.writeString(struct.colType); struct.statsData.write(oprot); + BitSet optionals = new BitSet(); + if (struct.isSetIsStatsCompliant()) { + optionals.set(0); + } + if (struct.isSetWriteId()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetIsStatsCompliant()) { + oprot.writeBool(struct.isStatsCompliant); + } + if (struct.isSetWriteId()) { + oprot.writeI64(struct.writeId); + } } @Override @@ -586,6 +783,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatisticsObj struct.statsData = new ColumnStatisticsData(); struct.statsData.read(iprot); struct.setStatsDataIsSet(true); + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.isStatsCompliant = iprot.readBool(); + struct.setIsStatsCompliantIsSet(true); + } + if (incoming.get(1)) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } } } diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java index db47f9db8b..6b840198f1 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java @@ -533,14 +533,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CommitTxnRequest st case 3: // WRITE_EVENT_INFOS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list602 = iprot.readListBegin(); - struct.writeEventInfos = new ArrayList(_list602.size); - WriteEventInfo _elem603; - for (int _i604 = 0; _i604 < _list602.size; ++_i604) + org.apache.thrift.protocol.TList _list612 = iprot.readListBegin(); + struct.writeEventInfos = new ArrayList(_list612.size); + WriteEventInfo _elem613; + for (int _i614 = 0; _i614 < _list612.size; ++_i614) { - _elem603 = new WriteEventInfo(); - _elem603.read(iprot); - struct.writeEventInfos.add(_elem603); + _elem613 = new WriteEventInfo(); + _elem613.read(iprot); + struct.writeEventInfos.add(_elem613); } iprot.readListEnd(); } @@ -577,9 +577,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CommitTxnRequest s oprot.writeFieldBegin(WRITE_EVENT_INFOS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.writeEventInfos.size())); - for (WriteEventInfo _iter605 : struct.writeEventInfos) + for (WriteEventInfo _iter615 : struct.writeEventInfos) { - _iter605.write(oprot); + _iter615.write(oprot); } oprot.writeListEnd(); } @@ -618,9 +618,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest st if (struct.isSetWriteEventInfos()) { { oprot.writeI32(struct.writeEventInfos.size()); - for (WriteEventInfo _iter606 : struct.writeEventInfos) + for (WriteEventInfo _iter616 : struct.writeEventInfos) { - _iter606.write(oprot); + _iter616.write(oprot); } } } @@ -638,14 +638,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest str } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list607 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.writeEventInfos = new ArrayList(_list607.size); - WriteEventInfo _elem608; - for (int _i609 = 0; _i609 < _list607.size; ++_i609) + org.apache.thrift.protocol.TList _list617 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.writeEventInfos = new ArrayList(_list617.size); + WriteEventInfo _elem618; + for (int _i619 = 0; _i619 < _list617.size; ++_i619) { - _elem608 = new WriteEventInfo(); - _elem608.read(iprot); - struct.writeEventInfos.add(_elem608); + _elem618 = new WriteEventInfo(); + _elem618.read(iprot); + struct.writeEventInfos.add(_elem618); } } struct.setWriteEventInfosIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java index dd6df744cb..50a4b5a134 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java @@ -814,15 +814,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionRequest s case 6: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map698 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map698.size); - String _key699; - String _val700; - for (int _i701 = 0; _i701 < _map698.size; ++_i701) + org.apache.thrift.protocol.TMap _map708 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map708.size); + String _key709; + String _val710; + for (int _i711 = 0; _i711 < _map708.size; ++_i711) { - _key699 = iprot.readString(); - _val700 = iprot.readString(); - struct.properties.put(_key699, _val700); + _key709 = iprot.readString(); + _val710 = iprot.readString(); + struct.properties.put(_key709, _val710); } iprot.readMapEnd(); } @@ -878,10 +878,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionRequest oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter702 : struct.properties.entrySet()) + for (Map.Entry _iter712 : struct.properties.entrySet()) { - oprot.writeString(_iter702.getKey()); - oprot.writeString(_iter702.getValue()); + oprot.writeString(_iter712.getKey()); + oprot.writeString(_iter712.getValue()); } oprot.writeMapEnd(); } @@ -928,10 +928,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest s if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter703 : struct.properties.entrySet()) + for (Map.Entry _iter713 : struct.properties.entrySet()) { - oprot.writeString(_iter703.getKey()); - oprot.writeString(_iter703.getValue()); + oprot.writeString(_iter713.getKey()); + oprot.writeString(_iter713.getValue()); } } } @@ -957,15 +957,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest st } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map704 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map704.size); - String _key705; - String _val706; - for (int _i707 = 0; _i707 < _map704.size; ++_i707) + org.apache.thrift.protocol.TMap _map714 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map714.size); + String _key715; + String _val716; + for (int _i717 = 0; _i717 < _map714.size; ++_i717) { - _key705 = iprot.readString(); - _val706 = iprot.readString(); - struct.properties.put(_key705, _val706); + _key715 = iprot.readString(); + _val716 = iprot.readString(); + struct.properties.put(_key715, _val716); } } struct.setPropertiesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java index d631e21cec..adf719097a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java @@ -792,13 +792,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata st case 4: // TABLES_USED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set724 = iprot.readSetBegin(); - struct.tablesUsed = new HashSet(2*_set724.size); - String _elem725; - for (int _i726 = 0; _i726 < _set724.size; ++_i726) + org.apache.thrift.protocol.TSet _set734 = iprot.readSetBegin(); + struct.tablesUsed = new HashSet(2*_set734.size); + String _elem735; + for (int _i736 = 0; _i736 < _set734.size; ++_i736) { - _elem725 = iprot.readString(); - struct.tablesUsed.add(_elem725); + _elem735 = iprot.readString(); + struct.tablesUsed.add(_elem735); } iprot.readSetEnd(); } @@ -855,9 +855,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreationMetadata s oprot.writeFieldBegin(TABLES_USED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.tablesUsed.size())); - for (String _iter727 : struct.tablesUsed) + for (String _iter737 : struct.tablesUsed) { - oprot.writeString(_iter727); + oprot.writeString(_iter737); } oprot.writeSetEnd(); } @@ -897,9 +897,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CreationMetadata st oprot.writeString(struct.tblName); { oprot.writeI32(struct.tablesUsed.size()); - for (String _iter728 : struct.tablesUsed) + for (String _iter738 : struct.tablesUsed) { - oprot.writeString(_iter728); + oprot.writeString(_iter738); } } BitSet optionals = new BitSet(); @@ -928,13 +928,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CreationMetadata str struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TSet _set729 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tablesUsed = new HashSet(2*_set729.size); - String _elem730; - for (int _i731 = 0; _i731 < _set729.size; ++_i731) + org.apache.thrift.protocol.TSet _set739 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tablesUsed = new HashSet(2*_set739.size); + String _elem740; + for (int _i741 = 0; _i741 < _set739.size; ++_i741) { - _elem730 = iprot.readString(); - struct.tablesUsed.add(_elem730); + _elem740 = iprot.readString(); + struct.tablesUsed.add(_elem740); } } struct.setTablesUsedIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java index e29932c5ab..9b6b8bc56f 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DefaultConstraintsR case 1: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list360 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list360.size); - SQLDefaultConstraint _elem361; - for (int _i362 = 0; _i362 < _list360.size; ++_i362) + org.apache.thrift.protocol.TList _list370 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list370.size); + SQLDefaultConstraint _elem371; + for (int _i372 = 0; _i372 < _list370.size; ++_i372) { - _elem361 = new SQLDefaultConstraint(); - _elem361.read(iprot); - struct.defaultConstraints.add(_elem361); + _elem371 = new SQLDefaultConstraint(); + _elem371.read(iprot); + struct.defaultConstraints.add(_elem371); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DefaultConstraints oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter363 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter373 : struct.defaultConstraints) { - _iter363.write(oprot); + _iter373.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsR TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter364 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter374 : struct.defaultConstraints) { - _iter364.write(oprot); + _iter374.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsR public void read(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list365.size); - SQLDefaultConstraint _elem366; - for (int _i367 = 0; _i367 < _list365.size; ++_i367) + org.apache.thrift.protocol.TList _list375 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list375.size); + SQLDefaultConstraint _elem376; + for (int _i377 = 0; _i377 < _list375.size; ++_i377) { - _elem366 = new SQLDefaultConstraint(); - _elem366.read(iprot); - struct.defaultConstraints.add(_elem366); + _elem376 = new SQLDefaultConstraint(); + _elem376.read(iprot); + struct.defaultConstraints.add(_elem376); } } struct.setDefaultConstraintsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java index 0a7d3b5bca..f545017bfd 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DropPartitionsResul case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list498 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list498.size); - Partition _elem499; - for (int _i500 = 0; _i500 < _list498.size; ++_i500) + org.apache.thrift.protocol.TList _list508 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list508.size); + Partition _elem509; + for (int _i510 = 0; _i510 < _list508.size; ++_i510) { - _elem499 = new Partition(); - _elem499.read(iprot); - struct.partitions.add(_elem499); + _elem509 = new Partition(); + _elem509.read(iprot); + struct.partitions.add(_elem509); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DropPartitionsResu oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter501 : struct.partitions) + for (Partition _iter511 : struct.partitions) { - _iter501.write(oprot); + _iter511.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResul if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter502 : struct.partitions) + for (Partition _iter512 : struct.partitions) { - _iter502.write(oprot); + _iter512.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResult BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list503 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list503.size); - Partition _elem504; - for (int _i505 = 0; _i505 < _list503.size; ++_i505) + org.apache.thrift.protocol.TList _list513 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list513.size); + Partition _elem514; + for (int _i515 = 0; _i515 < _list513.size; ++_i515) { - _elem504 = new Partition(); - _elem504.read(iprot); - struct.partitions.add(_elem504); + _elem514 = new Partition(); + _elem514.read(iprot); + struct.partitions.add(_elem514); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java index a128dacd89..fbe7154c5f 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java @@ -344,15 +344,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, EnvironmentContext case 1: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map318 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map318.size); - String _key319; - String _val320; - for (int _i321 = 0; _i321 < _map318.size; ++_i321) + org.apache.thrift.protocol.TMap _map328 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map328.size); + String _key329; + String _val330; + for (int _i331 = 0; _i331 < _map328.size; ++_i331) { - _key319 = iprot.readString(); - _val320 = iprot.readString(); - struct.properties.put(_key319, _val320); + _key329 = iprot.readString(); + _val330 = iprot.readString(); + struct.properties.put(_key329, _val330); } iprot.readMapEnd(); } @@ -378,10 +378,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, EnvironmentContext oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter322 : struct.properties.entrySet()) + for (Map.Entry _iter332 : struct.properties.entrySet()) { - oprot.writeString(_iter322.getKey()); - oprot.writeString(_iter322.getValue()); + oprot.writeString(_iter332.getKey()); + oprot.writeString(_iter332.getValue()); } oprot.writeMapEnd(); } @@ -412,10 +412,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, EnvironmentContext if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter323 : struct.properties.entrySet()) + for (Map.Entry _iter333 : struct.properties.entrySet()) { - oprot.writeString(_iter323.getKey()); - oprot.writeString(_iter323.getValue()); + oprot.writeString(_iter333.getKey()); + oprot.writeString(_iter333.getValue()); } } } @@ -427,15 +427,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, EnvironmentContext s BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map324 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map324.size); - String _key325; - String _val326; - for (int _i327 = 0; _i327 < _map324.size; ++_i327) + org.apache.thrift.protocol.TMap _map334 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map334.size); + String _key335; + String _val336; + for (int _i337 = 0; _i337 < _map334.size; ++_i337) { - _key325 = iprot.readString(); - _val326 = iprot.readString(); - struct.properties.put(_key325, _val326); + _key335 = iprot.readString(); + _val336 = iprot.readString(); + struct.properties.put(_key335, _val336); } } struct.setPropertiesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java index 8f5b4e5bb4..bcff0f4957 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FindSchemasByColsRe case 1: // SCHEMA_VERSIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list944 = iprot.readListBegin(); - struct.schemaVersions = new ArrayList(_list944.size); - SchemaVersionDescriptor _elem945; - for (int _i946 = 0; _i946 < _list944.size; ++_i946) + org.apache.thrift.protocol.TList _list954 = iprot.readListBegin(); + struct.schemaVersions = new ArrayList(_list954.size); + SchemaVersionDescriptor _elem955; + for (int _i956 = 0; _i956 < _list954.size; ++_i956) { - _elem945 = new SchemaVersionDescriptor(); - _elem945.read(iprot); - struct.schemaVersions.add(_elem945); + _elem955 = new SchemaVersionDescriptor(); + _elem955.read(iprot); + struct.schemaVersions.add(_elem955); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FindSchemasByColsR oprot.writeFieldBegin(SCHEMA_VERSIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.schemaVersions.size())); - for (SchemaVersionDescriptor _iter947 : struct.schemaVersions) + for (SchemaVersionDescriptor _iter957 : struct.schemaVersions) { - _iter947.write(oprot); + _iter957.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRe if (struct.isSetSchemaVersions()) { { oprot.writeI32(struct.schemaVersions.size()); - for (SchemaVersionDescriptor _iter948 : struct.schemaVersions) + for (SchemaVersionDescriptor _iter958 : struct.schemaVersions) { - _iter948.write(oprot); + _iter958.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRes BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.schemaVersions = new ArrayList(_list949.size); - SchemaVersionDescriptor _elem950; - for (int _i951 = 0; _i951 < _list949.size; ++_i951) + org.apache.thrift.protocol.TList _list959 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.schemaVersions = new ArrayList(_list959.size); + SchemaVersionDescriptor _elem960; + for (int _i961 = 0; _i961 < _list959.size; ++_i961) { - _elem950 = new SchemaVersionDescriptor(); - _elem950.read(iprot); - struct.schemaVersions.add(_elem950); + _elem960 = new SchemaVersionDescriptor(); + _elem960.read(iprot); + struct.schemaVersions.add(_elem960); } } struct.setSchemaVersionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java index dc2627a1fb..c0231a29f6 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java @@ -794,13 +794,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventRequest st case 5: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list764.size); - String _elem765; - for (int _i766 = 0; _i766 < _list764.size; ++_i766) + org.apache.thrift.protocol.TList _list774 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list774.size); + String _elem775; + for (int _i776 = 0; _i776 < _list774.size; ++_i776) { - _elem765 = iprot.readString(); - struct.partitionVals.add(_elem765); + _elem775 = iprot.readString(); + struct.partitionVals.add(_elem775); } iprot.readListEnd(); } @@ -857,9 +857,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FireEventRequest s oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter767 : struct.partitionVals) + for (String _iter777 : struct.partitionVals) { - oprot.writeString(_iter767); + oprot.writeString(_iter777); } oprot.writeListEnd(); } @@ -915,9 +915,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest st if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter768 : struct.partitionVals) + for (String _iter778 : struct.partitionVals) { - oprot.writeString(_iter768); + oprot.writeString(_iter778); } } } @@ -945,13 +945,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest str } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list769.size); - String _elem770; - for (int _i771 = 0; _i771 < _list769.size; ++_i771) + org.apache.thrift.protocol.TList _list779 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list779.size); + String _elem780; + for (int _i781 = 0; _i781 < _list779.size; ++_i781) { - _elem770 = iprot.readString(); - struct.partitionVals.add(_elem770); + _elem780 = iprot.readString(); + struct.partitionVals.add(_elem780); } } struct.setPartitionValsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java index 8fae31cba0..e9546c16e5 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ForeignKeysResponse case 1: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list336 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list336.size); - SQLForeignKey _elem337; - for (int _i338 = 0; _i338 < _list336.size; ++_i338) + org.apache.thrift.protocol.TList _list346 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list346.size); + SQLForeignKey _elem347; + for (int _i348 = 0; _i348 < _list346.size; ++_i348) { - _elem337 = new SQLForeignKey(); - _elem337.read(iprot); - struct.foreignKeys.add(_elem337); + _elem347 = new SQLForeignKey(); + _elem347.read(iprot); + struct.foreignKeys.add(_elem347); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ForeignKeysRespons oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter339 : struct.foreignKeys) + for (SQLForeignKey _iter349 : struct.foreignKeys) { - _iter339.write(oprot); + _iter349.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ForeignKeysResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter340 : struct.foreignKeys) + for (SQLForeignKey _iter350 : struct.foreignKeys) { - _iter340.write(oprot); + _iter350.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ForeignKeysResponse public void read(org.apache.thrift.protocol.TProtocol prot, ForeignKeysResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list341.size); - SQLForeignKey _elem342; - for (int _i343 = 0; _i343 < _list341.size; ++_i343) + org.apache.thrift.protocol.TList _list351 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list351.size); + SQLForeignKey _elem352; + for (int _i353 = 0; _i353 < _list351.size; ++_i353) { - _elem342 = new SQLForeignKey(); - _elem342.read(iprot); - struct.foreignKeys.add(_elem342); + _elem352 = new SQLForeignKey(); + _elem352.read(iprot); + struct.foreignKeys.add(_elem352); } } struct.setForeignKeysIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java index ce0feb929d..2b7a68d54f 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java @@ -1079,14 +1079,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Function struct) th case 8: // RESOURCE_URIS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list554 = iprot.readListBegin(); - struct.resourceUris = new ArrayList(_list554.size); - ResourceUri _elem555; - for (int _i556 = 0; _i556 < _list554.size; ++_i556) + org.apache.thrift.protocol.TList _list564 = iprot.readListBegin(); + struct.resourceUris = new ArrayList(_list564.size); + ResourceUri _elem565; + for (int _i566 = 0; _i566 < _list564.size; ++_i566) { - _elem555 = new ResourceUri(); - _elem555.read(iprot); - struct.resourceUris.add(_elem555); + _elem565 = new ResourceUri(); + _elem565.read(iprot); + struct.resourceUris.add(_elem565); } iprot.readListEnd(); } @@ -1153,9 +1153,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Function struct) t oprot.writeFieldBegin(RESOURCE_URIS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourceUris.size())); - for (ResourceUri _iter557 : struct.resourceUris) + for (ResourceUri _iter567 : struct.resourceUris) { - _iter557.write(oprot); + _iter567.write(oprot); } oprot.writeListEnd(); } @@ -1238,9 +1238,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Function struct) th if (struct.isSetResourceUris()) { { oprot.writeI32(struct.resourceUris.size()); - for (ResourceUri _iter558 : struct.resourceUris) + for (ResourceUri _iter568 : struct.resourceUris) { - _iter558.write(oprot); + _iter568.write(oprot); } } } @@ -1283,14 +1283,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Function struct) thr } if (incoming.get(7)) { { - org.apache.thrift.protocol.TList _list559 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourceUris = new ArrayList(_list559.size); - ResourceUri _elem560; - for (int _i561 = 0; _i561 < _list559.size; ++_i561) + org.apache.thrift.protocol.TList _list569 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourceUris = new ArrayList(_list569.size); + ResourceUri _elem570; + for (int _i571 = 0; _i571 < _list569.size; ++_i571) { - _elem560 = new ResourceUri(); - _elem560.read(iprot); - struct.resourceUris.add(_elem560); + _elem570 = new ResourceUri(); + _elem570.read(iprot); + struct.resourceUris.add(_elem570); } } struct.setResourceUrisIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java index 13fe5fa6d5..631abfed9d 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResp case 1: // FUNCTIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list840 = iprot.readListBegin(); - struct.functions = new ArrayList(_list840.size); - Function _elem841; - for (int _i842 = 0; _i842 < _list840.size; ++_i842) + org.apache.thrift.protocol.TList _list850 = iprot.readListBegin(); + struct.functions = new ArrayList(_list850.size); + Function _elem851; + for (int _i852 = 0; _i852 < _list850.size; ++_i852) { - _elem841 = new Function(); - _elem841.read(iprot); - struct.functions.add(_elem841); + _elem851 = new Function(); + _elem851.read(iprot); + struct.functions.add(_elem851); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsRes oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size())); - for (Function _iter843 : struct.functions) + for (Function _iter853 : struct.functions) { - _iter843.write(oprot); + _iter853.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResp if (struct.isSetFunctions()) { { oprot.writeI32(struct.functions.size()); - for (Function _iter844 : struct.functions) + for (Function _iter854 : struct.functions) { - _iter844.write(oprot); + _iter854.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsRespo BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list845 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.functions = new ArrayList(_list845.size); - Function _elem846; - for (int _i847 = 0; _i847 < _list845.size; ++_i847) + org.apache.thrift.protocol.TList _list855 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.functions = new ArrayList(_list855.size); + Function _elem856; + for (int _i857 = 0; _i857 < _list855.size; ++_i857) { - _elem846 = new Function(); - _elem846.read(iprot); - struct.functions.add(_elem846); + _elem856 = new Function(); + _elem856.read(iprot); + struct.functions.add(_elem856); } } struct.setFunctionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java index 976bf001a0..3cb7793485 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java @@ -619,13 +619,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list790 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list790.size); - long _elem791; - for (int _i792 = 0; _i792 < _list790.size; ++_i792) + org.apache.thrift.protocol.TList _list800 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list800.size); + long _elem801; + for (int _i802 = 0; _i802 < _list800.size; ++_i802) { - _elem791 = iprot.readI64(); - struct.fileIds.add(_elem791); + _elem801 = iprot.readI64(); + struct.fileIds.add(_elem801); } iprot.readListEnd(); } @@ -675,9 +675,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter793 : struct.fileIds) + for (long _iter803 : struct.fileIds) { - oprot.writeI64(_iter793); + oprot.writeI64(_iter803); } oprot.writeListEnd(); } @@ -719,9 +719,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter794 : struct.fileIds) + for (long _iter804 : struct.fileIds) { - oprot.writeI64(_iter794); + oprot.writeI64(_iter804); } } oprot.writeBinary(struct.expr); @@ -745,13 +745,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list795 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list795.size); - long _elem796; - for (int _i797 = 0; _i797 < _list795.size; ++_i797) + org.apache.thrift.protocol.TList _list805 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list805.size); + long _elem806; + for (int _i807 = 0; _i807 < _list805.size; ++_i807) { - _elem796 = iprot.readI64(); - struct.fileIds.add(_elem796); + _elem806 = iprot.readI64(); + struct.fileIds.add(_elem806); } } struct.setFileIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java index 16a0113ac0..5237e12d1f 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java @@ -444,16 +444,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map780 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map780.size); - long _key781; - MetadataPpdResult _val782; - for (int _i783 = 0; _i783 < _map780.size; ++_i783) + org.apache.thrift.protocol.TMap _map790 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map790.size); + long _key791; + MetadataPpdResult _val792; + for (int _i793 = 0; _i793 < _map790.size; ++_i793) { - _key781 = iprot.readI64(); - _val782 = new MetadataPpdResult(); - _val782.read(iprot); - struct.metadata.put(_key781, _val782); + _key791 = iprot.readI64(); + _val792 = new MetadataPpdResult(); + _val792.read(iprot); + struct.metadata.put(_key791, _val792); } iprot.readMapEnd(); } @@ -487,10 +487,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, struct.metadata.size())); - for (Map.Entry _iter784 : struct.metadata.entrySet()) + for (Map.Entry _iter794 : struct.metadata.entrySet()) { - oprot.writeI64(_iter784.getKey()); - _iter784.getValue().write(oprot); + oprot.writeI64(_iter794.getKey()); + _iter794.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -518,10 +518,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter785 : struct.metadata.entrySet()) + for (Map.Entry _iter795 : struct.metadata.entrySet()) { - oprot.writeI64(_iter785.getKey()); - _iter785.getValue().write(oprot); + oprot.writeI64(_iter795.getKey()); + _iter795.getValue().write(oprot); } } oprot.writeBool(struct.isSupported); @@ -531,16 +531,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map786 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.metadata = new HashMap(2*_map786.size); - long _key787; - MetadataPpdResult _val788; - for (int _i789 = 0; _i789 < _map786.size; ++_i789) + org.apache.thrift.protocol.TMap _map796 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.metadata = new HashMap(2*_map796.size); + long _key797; + MetadataPpdResult _val798; + for (int _i799 = 0; _i799 < _map796.size; ++_i799) { - _key787 = iprot.readI64(); - _val788 = new MetadataPpdResult(); - _val788.read(iprot); - struct.metadata.put(_key787, _val788); + _key797 = iprot.readI64(); + _val798 = new MetadataPpdResult(); + _val798.read(iprot); + struct.metadata.put(_key797, _val798); } } struct.setMetadataIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java index 9e3ed8b282..ae3706e785 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list808 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list808.size); - long _elem809; - for (int _i810 = 0; _i810 < _list808.size; ++_i810) + org.apache.thrift.protocol.TList _list818 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list818.size); + long _elem819; + for (int _i820 = 0; _i820 < _list818.size; ++_i820) { - _elem809 = iprot.readI64(); - struct.fileIds.add(_elem809); + _elem819 = iprot.readI64(); + struct.fileIds.add(_elem819); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter811 : struct.fileIds) + for (long _iter821 : struct.fileIds) { - oprot.writeI64(_iter811); + oprot.writeI64(_iter821); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter812 : struct.fileIds) + for (long _iter822 : struct.fileIds) { - oprot.writeI64(_iter812); + oprot.writeI64(_iter822); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list813 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list813.size); - long _elem814; - for (int _i815 = 0; _i815 < _list813.size; ++_i815) + org.apache.thrift.protocol.TList _list823 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list823.size); + long _elem824; + for (int _i825 = 0; _i825 < _list823.size; ++_i825) { - _elem814 = iprot.readI64(); - struct.fileIds.add(_elem814); + _elem824 = iprot.readI64(); + struct.fileIds.add(_elem824); } } struct.setFileIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java index bc73f1ec27..f98f6261c6 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java @@ -433,15 +433,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataResu case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map798 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map798.size); - long _key799; - ByteBuffer _val800; - for (int _i801 = 0; _i801 < _map798.size; ++_i801) + org.apache.thrift.protocol.TMap _map808 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map808.size); + long _key809; + ByteBuffer _val810; + for (int _i811 = 0; _i811 < _map808.size; ++_i811) { - _key799 = iprot.readI64(); - _val800 = iprot.readBinary(); - struct.metadata.put(_key799, _val800); + _key809 = iprot.readI64(); + _val810 = iprot.readBinary(); + struct.metadata.put(_key809, _val810); } iprot.readMapEnd(); } @@ -475,10 +475,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataRes oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (Map.Entry _iter802 : struct.metadata.entrySet()) + for (Map.Entry _iter812 : struct.metadata.entrySet()) { - oprot.writeI64(_iter802.getKey()); - oprot.writeBinary(_iter802.getValue()); + oprot.writeI64(_iter812.getKey()); + oprot.writeBinary(_iter812.getValue()); } oprot.writeMapEnd(); } @@ -506,10 +506,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter803 : struct.metadata.entrySet()) + for (Map.Entry _iter813 : struct.metadata.entrySet()) { - oprot.writeI64(_iter803.getKey()); - oprot.writeBinary(_iter803.getValue()); + oprot.writeI64(_iter813.getKey()); + oprot.writeBinary(_iter813.getValue()); } } oprot.writeBool(struct.isSupported); @@ -519,15 +519,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map804 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new HashMap(2*_map804.size); - long _key805; - ByteBuffer _val806; - for (int _i807 = 0; _i807 < _map804.size; ++_i807) + org.apache.thrift.protocol.TMap _map814 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new HashMap(2*_map814.size); + long _key815; + ByteBuffer _val816; + for (int _i817 = 0; _i817 < _map814.size; ++_i817) { - _key805 = iprot.readI64(); - _val806 = iprot.readBinary(); - struct.metadata.put(_key805, _val806); + _key815 = iprot.readI64(); + _val816 = iprot.readBinary(); + struct.metadata.put(_key815, _val816); } } struct.setMetadataIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java index 93ca303157..5a43aace42 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java @@ -447,14 +447,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsInfoResp case 2: // OPEN_TXNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list562 = iprot.readListBegin(); - struct.open_txns = new ArrayList(_list562.size); - TxnInfo _elem563; - for (int _i564 = 0; _i564 < _list562.size; ++_i564) + org.apache.thrift.protocol.TList _list572 = iprot.readListBegin(); + struct.open_txns = new ArrayList(_list572.size); + TxnInfo _elem573; + for (int _i574 = 0; _i574 < _list572.size; ++_i574) { - _elem563 = new TxnInfo(); - _elem563.read(iprot); - struct.open_txns.add(_elem563); + _elem573 = new TxnInfo(); + _elem573.read(iprot); + struct.open_txns.add(_elem573); } iprot.readListEnd(); } @@ -483,9 +483,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsInfoRes oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.open_txns.size())); - for (TxnInfo _iter565 : struct.open_txns) + for (TxnInfo _iter575 : struct.open_txns) { - _iter565.write(oprot); + _iter575.write(oprot); } oprot.writeListEnd(); } @@ -511,9 +511,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoResp oprot.writeI64(struct.txn_high_water_mark); { oprot.writeI32(struct.open_txns.size()); - for (TxnInfo _iter566 : struct.open_txns) + for (TxnInfo _iter576 : struct.open_txns) { - _iter566.write(oprot); + _iter576.write(oprot); } } } @@ -524,14 +524,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoRespo struct.txn_high_water_mark = iprot.readI64(); struct.setTxn_high_water_markIsSet(true); { - org.apache.thrift.protocol.TList _list567 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.open_txns = new ArrayList(_list567.size); - TxnInfo _elem568; - for (int _i569 = 0; _i569 < _list567.size; ++_i569) + org.apache.thrift.protocol.TList _list577 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.open_txns = new ArrayList(_list577.size); + TxnInfo _elem578; + for (int _i579 = 0; _i579 < _list577.size; ++_i579) { - _elem568 = new TxnInfo(); - _elem568.read(iprot); - struct.open_txns.add(_elem568); + _elem578 = new TxnInfo(); + _elem578.read(iprot); + struct.open_txns.add(_elem578); } } struct.setOpen_txnsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java index c152a0aab7..93ccc0a000 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java @@ -615,13 +615,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsResponse case 2: // OPEN_TXNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list570 = iprot.readListBegin(); - struct.open_txns = new ArrayList(_list570.size); - long _elem571; - for (int _i572 = 0; _i572 < _list570.size; ++_i572) + org.apache.thrift.protocol.TList _list580 = iprot.readListBegin(); + struct.open_txns = new ArrayList(_list580.size); + long _elem581; + for (int _i582 = 0; _i582 < _list580.size; ++_i582) { - _elem571 = iprot.readI64(); - struct.open_txns.add(_elem571); + _elem581 = iprot.readI64(); + struct.open_txns.add(_elem581); } iprot.readListEnd(); } @@ -666,9 +666,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsRespons oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.open_txns.size())); - for (long _iter573 : struct.open_txns) + for (long _iter583 : struct.open_txns) { - oprot.writeI64(_iter573); + oprot.writeI64(_iter583); } oprot.writeListEnd(); } @@ -704,9 +704,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse oprot.writeI64(struct.txn_high_water_mark); { oprot.writeI32(struct.open_txns.size()); - for (long _iter574 : struct.open_txns) + for (long _iter584 : struct.open_txns) { - oprot.writeI64(_iter574); + oprot.writeI64(_iter584); } } oprot.writeBinary(struct.abortedBits); @@ -726,13 +726,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse struct.txn_high_water_mark = iprot.readI64(); struct.setTxn_high_water_markIsSet(true); { - org.apache.thrift.protocol.TList _list575 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.open_txns = new ArrayList(_list575.size); - long _elem576; - for (int _i577 = 0; _i577 < _list575.size; ++_i577) + org.apache.thrift.protocol.TList _list585 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.open_txns = new ArrayList(_list585.size); + long _elem586; + for (int _i587 = 0; _i587 < _list585.size; ++_i587) { - _elem576 = iprot.readI64(); - struct.open_txns.add(_elem576); + _elem586 = iprot.readI64(); + struct.open_txns.add(_elem586); } } struct.setOpen_txnsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java index f241b5aa79..5218597791 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java @@ -606,13 +606,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest st case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list856 = iprot.readListBegin(); - struct.tblNames = new ArrayList(_list856.size); - String _elem857; - for (int _i858 = 0; _i858 < _list856.size; ++_i858) + org.apache.thrift.protocol.TList _list866 = iprot.readListBegin(); + struct.tblNames = new ArrayList(_list866.size); + String _elem867; + for (int _i868 = 0; _i868 < _list866.size; ++_i868) { - _elem857 = iprot.readString(); - struct.tblNames.add(_elem857); + _elem867 = iprot.readString(); + struct.tblNames.add(_elem867); } iprot.readListEnd(); } @@ -661,9 +661,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest s oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tblNames.size())); - for (String _iter859 : struct.tblNames) + for (String _iter869 : struct.tblNames) { - oprot.writeString(_iter859); + oprot.writeString(_iter869); } oprot.writeListEnd(); } @@ -716,9 +716,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest st if (struct.isSetTblNames()) { { oprot.writeI32(struct.tblNames.size()); - for (String _iter860 : struct.tblNames) + for (String _iter870 : struct.tblNames) { - oprot.writeString(_iter860); + oprot.writeString(_iter870); } } } @@ -738,13 +738,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list861 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tblNames = new ArrayList(_list861.size); - String _elem862; - for (int _i863 = 0; _i863 < _list861.size; ++_i863) + org.apache.thrift.protocol.TList _list871 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tblNames = new ArrayList(_list871.size); + String _elem872; + for (int _i873 = 0; _i873 < _list871.size; ++_i873) { - _elem862 = iprot.readString(); - struct.tblNames.add(_elem862); + _elem872 = iprot.readString(); + struct.tblNames.add(_elem872); } } struct.setTblNamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java index b351c40f97..cbb0b651d5 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesResult str case 1: // TABLES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list864 = iprot.readListBegin(); - struct.tables = new ArrayList(_list864.size); - Table _elem865; - for (int _i866 = 0; _i866 < _list864.size; ++_i866) + org.apache.thrift.protocol.TList _list874 = iprot.readListBegin(); + struct.tables = new ArrayList
(_list874.size); + Table _elem875; + for (int _i876 = 0; _i876 < _list874.size; ++_i876) { - _elem865 = new Table(); - _elem865.read(iprot); - struct.tables.add(_elem865); + _elem875 = new Table(); + _elem875.read(iprot); + struct.tables.add(_elem875); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesResult st oprot.writeFieldBegin(TABLES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tables.size())); - for (Table _iter867 : struct.tables) + for (Table _iter877 : struct.tables) { - _iter867.write(oprot); + _iter877.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tables.size()); - for (Table _iter868 : struct.tables) + for (Table _iter878 : struct.tables) { - _iter868.write(oprot); + _iter878.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list869 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tables = new ArrayList
(_list869.size); - Table _elem870; - for (int _i871 = 0; _i871 < _list869.size; ++_i871) + org.apache.thrift.protocol.TList _list879 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tables = new ArrayList
(_list879.size); + Table _elem880; + for (int _i881 = 0; _i881 < _list879.size; ++_i881) { - _elem870 = new Table(); - _elem870.read(iprot); - struct.tables.add(_elem870); + _elem880 = new Table(); + _elem880.read(iprot); + struct.tables.add(_elem880); } } struct.setTablesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java index a5bbb86af1..0dabc74547 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java @@ -436,13 +436,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsReq case 1: // FULL_TABLE_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list618 = iprot.readListBegin(); - struct.fullTableNames = new ArrayList(_list618.size); - String _elem619; - for (int _i620 = 0; _i620 < _list618.size; ++_i620) + org.apache.thrift.protocol.TList _list628 = iprot.readListBegin(); + struct.fullTableNames = new ArrayList(_list628.size); + String _elem629; + for (int _i630 = 0; _i630 < _list628.size; ++_i630) { - _elem619 = iprot.readString(); - struct.fullTableNames.add(_elem619); + _elem629 = iprot.readString(); + struct.fullTableNames.add(_elem629); } iprot.readListEnd(); } @@ -476,9 +476,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRe oprot.writeFieldBegin(FULL_TABLE_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.fullTableNames.size())); - for (String _iter621 : struct.fullTableNames) + for (String _iter631 : struct.fullTableNames) { - oprot.writeString(_iter621); + oprot.writeString(_iter631); } oprot.writeListEnd(); } @@ -508,9 +508,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsReq TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fullTableNames.size()); - for (String _iter622 : struct.fullTableNames) + for (String _iter632 : struct.fullTableNames) { - oprot.writeString(_iter622); + oprot.writeString(_iter632); } } oprot.writeString(struct.validTxnList); @@ -520,13 +520,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsReq public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list623 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.fullTableNames = new ArrayList(_list623.size); - String _elem624; - for (int _i625 = 0; _i625 < _list623.size; ++_i625) + org.apache.thrift.protocol.TList _list633 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.fullTableNames = new ArrayList(_list633.size); + String _elem634; + for (int _i635 = 0; _i635 < _list633.size; ++_i635) { - _elem624 = iprot.readString(); - struct.fullTableNames.add(_elem624); + _elem634 = iprot.readString(); + struct.fullTableNames.add(_elem634); } } struct.setFullTableNamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java index 96a6a00572..a118953cca 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsRes case 1: // TBL_VALID_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list634 = iprot.readListBegin(); - struct.tblValidWriteIds = new ArrayList(_list634.size); - TableValidWriteIds _elem635; - for (int _i636 = 0; _i636 < _list634.size; ++_i636) + org.apache.thrift.protocol.TList _list644 = iprot.readListBegin(); + struct.tblValidWriteIds = new ArrayList(_list644.size); + TableValidWriteIds _elem645; + for (int _i646 = 0; _i646 < _list644.size; ++_i646) { - _elem635 = new TableValidWriteIds(); - _elem635.read(iprot); - struct.tblValidWriteIds.add(_elem635); + _elem645 = new TableValidWriteIds(); + _elem645.read(iprot); + struct.tblValidWriteIds.add(_elem645); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRe oprot.writeFieldBegin(TBL_VALID_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tblValidWriteIds.size())); - for (TableValidWriteIds _iter637 : struct.tblValidWriteIds) + for (TableValidWriteIds _iter647 : struct.tblValidWriteIds) { - _iter637.write(oprot); + _iter647.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tblValidWriteIds.size()); - for (TableValidWriteIds _iter638 : struct.tblValidWriteIds) + for (TableValidWriteIds _iter648 : struct.tblValidWriteIds) { - _iter638.write(oprot); + _iter648.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRes public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list639 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tblValidWriteIds = new ArrayList(_list639.size); - TableValidWriteIds _elem640; - for (int _i641 = 0; _i641 < _list639.size; ++_i641) + org.apache.thrift.protocol.TList _list649 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tblValidWriteIds = new ArrayList(_list649.size); + TableValidWriteIds _elem650; + for (int _i651 = 0; _i651 < _list649.size; ++_i651) { - _elem640 = new TableValidWriteIds(); - _elem640.read(iprot); - struct.tblValidWriteIds.add(_elem640); + _elem650 = new TableValidWriteIds(); + _elem650.read(iprot); + struct.tblValidWriteIds.add(_elem650); } } struct.setTblValidWriteIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java index a6535aa8fc..3f7f9d08e3 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java @@ -453,13 +453,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 1: // ABORTED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set682 = iprot.readSetBegin(); - struct.aborted = new HashSet(2*_set682.size); - long _elem683; - for (int _i684 = 0; _i684 < _set682.size; ++_i684) + org.apache.thrift.protocol.TSet _set692 = iprot.readSetBegin(); + struct.aborted = new HashSet(2*_set692.size); + long _elem693; + for (int _i694 = 0; _i694 < _set692.size; ++_i694) { - _elem683 = iprot.readI64(); - struct.aborted.add(_elem683); + _elem693 = iprot.readI64(); + struct.aborted.add(_elem693); } iprot.readSetEnd(); } @@ -471,13 +471,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 2: // NOSUCH if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set685 = iprot.readSetBegin(); - struct.nosuch = new HashSet(2*_set685.size); - long _elem686; - for (int _i687 = 0; _i687 < _set685.size; ++_i687) + org.apache.thrift.protocol.TSet _set695 = iprot.readSetBegin(); + struct.nosuch = new HashSet(2*_set695.size); + long _elem696; + for (int _i697 = 0; _i697 < _set695.size; ++_i697) { - _elem686 = iprot.readI64(); - struct.nosuch.add(_elem686); + _elem696 = iprot.readI64(); + struct.nosuch.add(_elem696); } iprot.readSetEnd(); } @@ -503,9 +503,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(ABORTED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.aborted.size())); - for (long _iter688 : struct.aborted) + for (long _iter698 : struct.aborted) { - oprot.writeI64(_iter688); + oprot.writeI64(_iter698); } oprot.writeSetEnd(); } @@ -515,9 +515,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(NOSUCH_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.nosuch.size())); - for (long _iter689 : struct.nosuch) + for (long _iter699 : struct.nosuch) { - oprot.writeI64(_iter689); + oprot.writeI64(_iter699); } oprot.writeSetEnd(); } @@ -542,16 +542,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.aborted.size()); - for (long _iter690 : struct.aborted) + for (long _iter700 : struct.aborted) { - oprot.writeI64(_iter690); + oprot.writeI64(_iter700); } } { oprot.writeI32(struct.nosuch.size()); - for (long _iter691 : struct.nosuch) + for (long _iter701 : struct.nosuch) { - oprot.writeI64(_iter691); + oprot.writeI64(_iter701); } } } @@ -560,24 +560,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TSet _set692 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.aborted = new HashSet(2*_set692.size); - long _elem693; - for (int _i694 = 0; _i694 < _set692.size; ++_i694) + org.apache.thrift.protocol.TSet _set702 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.aborted = new HashSet(2*_set702.size); + long _elem703; + for (int _i704 = 0; _i704 < _set702.size; ++_i704) { - _elem693 = iprot.readI64(); - struct.aborted.add(_elem693); + _elem703 = iprot.readI64(); + struct.aborted.add(_elem703); } } struct.setAbortedIsSet(true); { - org.apache.thrift.protocol.TSet _set695 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.nosuch = new HashSet(2*_set695.size); - long _elem696; - for (int _i697 = 0; _i697 < _set695.size; ++_i697) + org.apache.thrift.protocol.TSet _set705 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.nosuch = new HashSet(2*_set705.size); + long _elem706; + for (int _i707 = 0; _i707 < _set705.size; ++_i707) { - _elem696 = iprot.readI64(); - struct.nosuch.add(_elem696); + _elem706 = iprot.readI64(); + struct.nosuch.add(_elem706); } } struct.setNosuchIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java index 70690a4e0a..ee877dc98a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java @@ -636,13 +636,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 2: // FILES_ADDED if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list740 = iprot.readListBegin(); - struct.filesAdded = new ArrayList(_list740.size); - String _elem741; - for (int _i742 = 0; _i742 < _list740.size; ++_i742) + org.apache.thrift.protocol.TList _list750 = iprot.readListBegin(); + struct.filesAdded = new ArrayList(_list750.size); + String _elem751; + for (int _i752 = 0; _i752 < _list750.size; ++_i752) { - _elem741 = iprot.readString(); - struct.filesAdded.add(_elem741); + _elem751 = iprot.readString(); + struct.filesAdded.add(_elem751); } iprot.readListEnd(); } @@ -654,13 +654,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 3: // FILES_ADDED_CHECKSUM if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list743 = iprot.readListBegin(); - struct.filesAddedChecksum = new ArrayList(_list743.size); - String _elem744; - for (int _i745 = 0; _i745 < _list743.size; ++_i745) + org.apache.thrift.protocol.TList _list753 = iprot.readListBegin(); + struct.filesAddedChecksum = new ArrayList(_list753.size); + String _elem754; + for (int _i755 = 0; _i755 < _list753.size; ++_i755) { - _elem744 = iprot.readString(); - struct.filesAddedChecksum.add(_elem744); + _elem754 = iprot.readString(); + struct.filesAddedChecksum.add(_elem754); } iprot.readListEnd(); } @@ -672,13 +672,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 4: // SUB_DIRECTORY_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list746 = iprot.readListBegin(); - struct.subDirectoryList = new ArrayList(_list746.size); - String _elem747; - for (int _i748 = 0; _i748 < _list746.size; ++_i748) + org.apache.thrift.protocol.TList _list756 = iprot.readListBegin(); + struct.subDirectoryList = new ArrayList(_list756.size); + String _elem757; + for (int _i758 = 0; _i758 < _list756.size; ++_i758) { - _elem747 = iprot.readString(); - struct.subDirectoryList.add(_elem747); + _elem757 = iprot.readString(); + struct.subDirectoryList.add(_elem757); } iprot.readListEnd(); } @@ -709,9 +709,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAdded.size())); - for (String _iter749 : struct.filesAdded) + for (String _iter759 : struct.filesAdded) { - oprot.writeString(_iter749); + oprot.writeString(_iter759); } oprot.writeListEnd(); } @@ -722,9 +722,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_CHECKSUM_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAddedChecksum.size())); - for (String _iter750 : struct.filesAddedChecksum) + for (String _iter760 : struct.filesAddedChecksum) { - oprot.writeString(_iter750); + oprot.writeString(_iter760); } oprot.writeListEnd(); } @@ -736,9 +736,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(SUB_DIRECTORY_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.subDirectoryList.size())); - for (String _iter751 : struct.subDirectoryList) + for (String _iter761 : struct.subDirectoryList) { - oprot.writeString(_iter751); + oprot.writeString(_iter761); } oprot.writeListEnd(); } @@ -764,9 +764,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.filesAdded.size()); - for (String _iter752 : struct.filesAdded) + for (String _iter762 : struct.filesAdded) { - oprot.writeString(_iter752); + oprot.writeString(_iter762); } } BitSet optionals = new BitSet(); @@ -786,18 +786,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD if (struct.isSetFilesAddedChecksum()) { { oprot.writeI32(struct.filesAddedChecksum.size()); - for (String _iter753 : struct.filesAddedChecksum) + for (String _iter763 : struct.filesAddedChecksum) { - oprot.writeString(_iter753); + oprot.writeString(_iter763); } } } if (struct.isSetSubDirectoryList()) { { oprot.writeI32(struct.subDirectoryList.size()); - for (String _iter754 : struct.subDirectoryList) + for (String _iter764 : struct.subDirectoryList) { - oprot.writeString(_iter754); + oprot.writeString(_iter764); } } } @@ -807,13 +807,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestData struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list755 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAdded = new ArrayList(_list755.size); - String _elem756; - for (int _i757 = 0; _i757 < _list755.size; ++_i757) + org.apache.thrift.protocol.TList _list765 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAdded = new ArrayList(_list765.size); + String _elem766; + for (int _i767 = 0; _i767 < _list765.size; ++_i767) { - _elem756 = iprot.readString(); - struct.filesAdded.add(_elem756); + _elem766 = iprot.readString(); + struct.filesAdded.add(_elem766); } } struct.setFilesAddedIsSet(true); @@ -824,26 +824,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestDa } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list758 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAddedChecksum = new ArrayList(_list758.size); - String _elem759; - for (int _i760 = 0; _i760 < _list758.size; ++_i760) + org.apache.thrift.protocol.TList _list768 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAddedChecksum = new ArrayList(_list768.size); + String _elem769; + for (int _i770 = 0; _i770 < _list768.size; ++_i770) { - _elem759 = iprot.readString(); - struct.filesAddedChecksum.add(_elem759); + _elem769 = iprot.readString(); + struct.filesAddedChecksum.add(_elem769); } } struct.setFilesAddedChecksumIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.subDirectoryList = new ArrayList(_list761.size); - String _elem762; - for (int _i763 = 0; _i763 < _list761.size; ++_i763) + org.apache.thrift.protocol.TList _list771 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.subDirectoryList = new ArrayList(_list771.size); + String _elem772; + for (int _i773 = 0; _i773 < _list771.size; ++_i773) { - _elem762 = iprot.readString(); - struct.subDirectoryList.add(_elem762); + _elem772 = iprot.readString(); + struct.subDirectoryList.add(_elem772); } } struct.setSubDirectoryListIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidateColumnStatsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidateColumnStatsRequest.java new file mode 100644 index 0000000000..491eca0258 --- /dev/null +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidateColumnStatsRequest.java @@ -0,0 +1,811 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class InvalidateColumnStatsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidateColumnStatsRequest"); + + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField PART_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("partName", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)5); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new InvalidateColumnStatsRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new InvalidateColumnStatsRequestTupleSchemeFactory()); + } + + private String catName; // optional + private String dbName; // required + private String tableName; // required + private String partName; // optional + private long writeId; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CAT_NAME((short)1, "catName"), + DB_NAME((short)2, "dbName"), + TABLE_NAME((short)3, "tableName"), + PART_NAME((short)4, "partName"), + WRITE_ID((short)5, "writeId"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DB_NAME + return DB_NAME; + case 3: // TABLE_NAME + return TABLE_NAME; + case 4: // PART_NAME + return PART_NAME; + case 5: // WRITE_ID + return WRITE_ID; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __WRITEID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.PART_NAME,_Fields.WRITE_ID}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PART_NAME, new org.apache.thrift.meta_data.FieldMetaData("partName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(InvalidateColumnStatsRequest.class, metaDataMap); + } + + public InvalidateColumnStatsRequest() { + this.writeId = -1L; + + } + + public InvalidateColumnStatsRequest( + String dbName, + String tableName) + { + this(); + this.dbName = dbName; + this.tableName = tableName; + } + + /** + * Performs a deep copy on other. + */ + public InvalidateColumnStatsRequest(InvalidateColumnStatsRequest other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetCatName()) { + this.catName = other.catName; + } + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTableName()) { + this.tableName = other.tableName; + } + if (other.isSetPartName()) { + this.partName = other.partName; + } + this.writeId = other.writeId; + } + + public InvalidateColumnStatsRequest deepCopy() { + return new InvalidateColumnStatsRequest(this); + } + + @Override + public void clear() { + this.catName = null; + this.dbName = null; + this.tableName = null; + this.partName = null; + this.writeId = -1L; + + } + + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTableName() { + return this.tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public void unsetTableName() { + this.tableName = null; + } + + /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ + public boolean isSetTableName() { + return this.tableName != null; + } + + public void setTableNameIsSet(boolean value) { + if (!value) { + this.tableName = null; + } + } + + public String getPartName() { + return this.partName; + } + + public void setPartName(String partName) { + this.partName = partName; + } + + public void unsetPartName() { + this.partName = null; + } + + /** Returns true if field partName is set (has been assigned a value) and false otherwise */ + public boolean isSetPartName() { + return this.partName != null; + } + + public void setPartNameIsSet(boolean value) { + if (!value) { + this.partName = null; + } + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TABLE_NAME: + if (value == null) { + unsetTableName(); + } else { + setTableName((String)value); + } + break; + + case PART_NAME: + if (value == null) { + unsetPartName(); + } else { + setPartName((String)value); + } + break; + + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CAT_NAME: + return getCatName(); + + case DB_NAME: + return getDbName(); + + case TABLE_NAME: + return getTableName(); + + case PART_NAME: + return getPartName(); + + case WRITE_ID: + return getWriteId(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CAT_NAME: + return isSetCatName(); + case DB_NAME: + return isSetDbName(); + case TABLE_NAME: + return isSetTableName(); + case PART_NAME: + return isSetPartName(); + case WRITE_ID: + return isSetWriteId(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof InvalidateColumnStatsRequest) + return this.equals((InvalidateColumnStatsRequest)that); + return false; + } + + public boolean equals(InvalidateColumnStatsRequest that) { + if (that == null) + return false; + + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tableName = true && this.isSetTableName(); + boolean that_present_tableName = true && that.isSetTableName(); + if (this_present_tableName || that_present_tableName) { + if (!(this_present_tableName && that_present_tableName)) + return false; + if (!this.tableName.equals(that.tableName)) + return false; + } + + boolean this_present_partName = true && this.isSetPartName(); + boolean that_present_partName = true && that.isSetPartName(); + if (this_present_partName || that_present_partName) { + if (!(this_present_partName && that_present_partName)) + return false; + if (!this.partName.equals(that.partName)) + return false; + } + + boolean this_present_writeId = true && this.isSetWriteId(); + boolean that_present_writeId = true && that.isSetWriteId(); + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tableName = true && (isSetTableName()); + list.add(present_tableName); + if (present_tableName) + list.add(tableName); + + boolean present_partName = true && (isSetPartName()); + list.add(present_partName); + if (present_partName) + list.add(partName); + + boolean present_writeId = true && (isSetWriteId()); + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + return list.hashCode(); + } + + @Override + public int compareTo(InvalidateColumnStatsRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPartName()).compareTo(other.isSetPartName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPartName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partName, other.partName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("InvalidateColumnStatsRequest("); + boolean first = true; + + if (isSetCatName()) { + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } + if (!first) sb.append(", "); + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tableName:"); + if (this.tableName == null) { + sb.append("null"); + } else { + sb.append(this.tableName); + } + first = false; + if (isSetPartName()) { + if (!first) sb.append(", "); + sb.append("partName:"); + if (this.partName == null) { + sb.append("null"); + } else { + sb.append(this.partName); + } + first = false; + } + if (isSetWriteId()) { + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTableName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class InvalidateColumnStatsRequestStandardSchemeFactory implements SchemeFactory { + public InvalidateColumnStatsRequestStandardScheme getScheme() { + return new InvalidateColumnStatsRequestStandardScheme(); + } + } + + private static class InvalidateColumnStatsRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, InvalidateColumnStatsRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // PART_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.partName = iprot.readString(); + struct.setPartNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, InvalidateColumnStatsRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tableName != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.tableName); + oprot.writeFieldEnd(); + } + if (struct.partName != null) { + if (struct.isSetPartName()) { + oprot.writeFieldBegin(PART_NAME_FIELD_DESC); + oprot.writeString(struct.partName); + oprot.writeFieldEnd(); + } + } + if (struct.isSetWriteId()) { + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class InvalidateColumnStatsRequestTupleSchemeFactory implements SchemeFactory { + public InvalidateColumnStatsRequestTupleScheme getScheme() { + return new InvalidateColumnStatsRequestTupleScheme(); + } + } + + private static class InvalidateColumnStatsRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, InvalidateColumnStatsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.dbName); + oprot.writeString(struct.tableName); + BitSet optionals = new BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + if (struct.isSetPartName()) { + optionals.set(1); + } + if (struct.isSetWriteId()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } + if (struct.isSetPartName()) { + oprot.writeString(struct.partName); + } + if (struct.isSetWriteId()) { + oprot.writeI64(struct.writeId); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, InvalidateColumnStatsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(1)) { + struct.partName = iprot.readString(); + struct.setPartNameIsSet(true); + } + if (incoming.get(2)) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + } + } + +} + diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidateColumnStatsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidateColumnStatsResponse.java new file mode 100644 index 0000000000..0359b04793 --- /dev/null +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidateColumnStatsResponse.java @@ -0,0 +1,387 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class InvalidateColumnStatsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidateColumnStatsResponse"); + + private static final org.apache.thrift.protocol.TField RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("result", org.apache.thrift.protocol.TType.BOOL, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new InvalidateColumnStatsResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new InvalidateColumnStatsResponseTupleSchemeFactory()); + } + + private boolean result; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RESULT((short)1, "result"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RESULT + return RESULT; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __RESULT_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RESULT, new org.apache.thrift.meta_data.FieldMetaData("result", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(InvalidateColumnStatsResponse.class, metaDataMap); + } + + public InvalidateColumnStatsResponse() { + } + + public InvalidateColumnStatsResponse( + boolean result) + { + this(); + this.result = result; + setResultIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public InvalidateColumnStatsResponse(InvalidateColumnStatsResponse other) { + __isset_bitfield = other.__isset_bitfield; + this.result = other.result; + } + + public InvalidateColumnStatsResponse deepCopy() { + return new InvalidateColumnStatsResponse(this); + } + + @Override + public void clear() { + setResultIsSet(false); + this.result = false; + } + + public boolean isResult() { + return this.result; + } + + public void setResult(boolean result) { + this.result = result; + setResultIsSet(true); + } + + public void unsetResult() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __RESULT_ISSET_ID); + } + + /** Returns true if field result is set (has been assigned a value) and false otherwise */ + public boolean isSetResult() { + return EncodingUtils.testBit(__isset_bitfield, __RESULT_ISSET_ID); + } + + public void setResultIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RESULT_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RESULT: + if (value == null) { + unsetResult(); + } else { + setResult((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RESULT: + return isResult(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RESULT: + return isSetResult(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof InvalidateColumnStatsResponse) + return this.equals((InvalidateColumnStatsResponse)that); + return false; + } + + public boolean equals(InvalidateColumnStatsResponse that) { + if (that == null) + return false; + + boolean this_present_result = true; + boolean that_present_result = true; + if (this_present_result || that_present_result) { + if (!(this_present_result && that_present_result)) + return false; + if (this.result != that.result) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_result = true; + list.add(present_result); + if (present_result) + list.add(result); + + return list.hashCode(); + } + + @Override + public int compareTo(InvalidateColumnStatsResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetResult()).compareTo(other.isSetResult()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetResult()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.result, other.result); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("InvalidateColumnStatsResponse("); + boolean first = true; + + sb.append("result:"); + sb.append(this.result); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetResult()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'result' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class InvalidateColumnStatsResponseStandardSchemeFactory implements SchemeFactory { + public InvalidateColumnStatsResponseStandardScheme getScheme() { + return new InvalidateColumnStatsResponseStandardScheme(); + } + } + + private static class InvalidateColumnStatsResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, InvalidateColumnStatsResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RESULT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.result = iprot.readBool(); + struct.setResultIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, InvalidateColumnStatsResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(RESULT_FIELD_DESC); + oprot.writeBool(struct.result); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class InvalidateColumnStatsResponseTupleSchemeFactory implements SchemeFactory { + public InvalidateColumnStatsResponseTupleScheme getScheme() { + return new InvalidateColumnStatsResponseTupleScheme(); + } + } + + private static class InvalidateColumnStatsResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, InvalidateColumnStatsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeBool(struct.result); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, InvalidateColumnStatsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.result = iprot.readBool(); + struct.setResultIsSet(true); + } + } + +} + diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java index cabed5af72..237d00b4a2 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java @@ -689,14 +689,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LockRequest struct) case 1: // COMPONENT if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list666 = iprot.readListBegin(); - struct.component = new ArrayList(_list666.size); - LockComponent _elem667; - for (int _i668 = 0; _i668 < _list666.size; ++_i668) + org.apache.thrift.protocol.TList _list676 = iprot.readListBegin(); + struct.component = new ArrayList(_list676.size); + LockComponent _elem677; + for (int _i678 = 0; _i678 < _list676.size; ++_i678) { - _elem667 = new LockComponent(); - _elem667.read(iprot); - struct.component.add(_elem667); + _elem677 = new LockComponent(); + _elem677.read(iprot); + struct.component.add(_elem677); } iprot.readListEnd(); } @@ -754,9 +754,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LockRequest struct oprot.writeFieldBegin(COMPONENT_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.component.size())); - for (LockComponent _iter669 : struct.component) + for (LockComponent _iter679 : struct.component) { - _iter669.write(oprot); + _iter679.write(oprot); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.component.size()); - for (LockComponent _iter670 : struct.component) + for (LockComponent _iter680 : struct.component) { - _iter670.write(oprot); + _iter680.write(oprot); } } oprot.writeString(struct.user); @@ -830,14 +830,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list671 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.component = new ArrayList(_list671.size); - LockComponent _elem672; - for (int _i673 = 0; _i673 < _list671.size; ++_i673) + org.apache.thrift.protocol.TList _list681 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.component = new ArrayList(_list681.size); + LockComponent _elem682; + for (int _i683 = 0; _i683 < _list681.size; ++_i683) { - _elem672 = new LockComponent(); - _elem672.read(iprot); - struct.component.add(_elem672); + _elem682 = new LockComponent(); + _elem682.read(iprot); + struct.component.add(_elem682); } } struct.setComponentIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java index 54070adee3..bf2bbdd094 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotNullConstraintsR case 1: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list352 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list352.size); - SQLNotNullConstraint _elem353; - for (int _i354 = 0; _i354 < _list352.size; ++_i354) + org.apache.thrift.protocol.TList _list362 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list362.size); + SQLNotNullConstraint _elem363; + for (int _i364 = 0; _i364 < _list362.size; ++_i364) { - _elem353 = new SQLNotNullConstraint(); - _elem353.read(iprot); - struct.notNullConstraints.add(_elem353); + _elem363 = new SQLNotNullConstraint(); + _elem363.read(iprot); + struct.notNullConstraints.add(_elem363); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotNullConstraints oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter355 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter365 : struct.notNullConstraints) { - _iter355.write(oprot); + _iter365.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsR TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter356 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter366 : struct.notNullConstraints) { - _iter356.write(oprot); + _iter366.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsR public void read(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list357.size); - SQLNotNullConstraint _elem358; - for (int _i359 = 0; _i359 < _list357.size; ++_i359) + org.apache.thrift.protocol.TList _list367 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list367.size); + SQLNotNullConstraint _elem368; + for (int _i369 = 0; _i369 < _list367.size; ++_i369) { - _elem358 = new SQLNotNullConstraint(); - _elem358.read(iprot); - struct.notNullConstraints.add(_elem358); + _elem368 = new SQLNotNullConstraint(); + _elem368.read(iprot); + struct.notNullConstraints.add(_elem368); } } struct.setNotNullConstraintsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java index e86c9f6608..0801929fa7 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRe case 1: // EVENTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list732 = iprot.readListBegin(); - struct.events = new ArrayList(_list732.size); - NotificationEvent _elem733; - for (int _i734 = 0; _i734 < _list732.size; ++_i734) + org.apache.thrift.protocol.TList _list742 = iprot.readListBegin(); + struct.events = new ArrayList(_list742.size); + NotificationEvent _elem743; + for (int _i744 = 0; _i744 < _list742.size; ++_i744) { - _elem733 = new NotificationEvent(); - _elem733.read(iprot); - struct.events.add(_elem733); + _elem743 = new NotificationEvent(); + _elem743.read(iprot); + struct.events.add(_elem743); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventR oprot.writeFieldBegin(EVENTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.events.size())); - for (NotificationEvent _iter735 : struct.events) + for (NotificationEvent _iter745 : struct.events) { - _iter735.write(oprot); + _iter745.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.events.size()); - for (NotificationEvent _iter736 : struct.events) + for (NotificationEvent _iter746 : struct.events) { - _iter736.write(oprot); + _iter746.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.events = new ArrayList(_list737.size); - NotificationEvent _elem738; - for (int _i739 = 0; _i739 < _list737.size; ++_i739) + org.apache.thrift.protocol.TList _list747 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.events = new ArrayList(_list747.size); + NotificationEvent _elem748; + for (int _i749 = 0; _i749 < _list747.size; ++_i749) { - _elem738 = new NotificationEvent(); - _elem738.read(iprot); - struct.events.add(_elem738); + _elem748 = new NotificationEvent(); + _elem748.read(iprot); + struct.events.add(_elem748); } } struct.setEventsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java index 19b2c01b3a..57a96747b1 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java @@ -808,13 +808,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, OpenTxnRequest stru case 6: // REPL_SRC_TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list578 = iprot.readListBegin(); - struct.replSrcTxnIds = new ArrayList(_list578.size); - long _elem579; - for (int _i580 = 0; _i580 < _list578.size; ++_i580) + org.apache.thrift.protocol.TList _list588 = iprot.readListBegin(); + struct.replSrcTxnIds = new ArrayList(_list588.size); + long _elem589; + for (int _i590 = 0; _i590 < _list588.size; ++_i590) { - _elem579 = iprot.readI64(); - struct.replSrcTxnIds.add(_elem579); + _elem589 = iprot.readI64(); + struct.replSrcTxnIds.add(_elem589); } iprot.readListEnd(); } @@ -868,9 +868,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, OpenTxnRequest str oprot.writeFieldBegin(REPL_SRC_TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.replSrcTxnIds.size())); - for (long _iter581 : struct.replSrcTxnIds) + for (long _iter591 : struct.replSrcTxnIds) { - oprot.writeI64(_iter581); + oprot.writeI64(_iter591); } oprot.writeListEnd(); } @@ -917,9 +917,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnRequest stru if (struct.isSetReplSrcTxnIds()) { { oprot.writeI32(struct.replSrcTxnIds.size()); - for (long _iter582 : struct.replSrcTxnIds) + for (long _iter592 : struct.replSrcTxnIds) { - oprot.writeI64(_iter582); + oprot.writeI64(_iter592); } } } @@ -945,13 +945,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, OpenTxnRequest struc } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list583 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.replSrcTxnIds = new ArrayList(_list583.size); - long _elem584; - for (int _i585 = 0; _i585 < _list583.size; ++_i585) + org.apache.thrift.protocol.TList _list593 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.replSrcTxnIds = new ArrayList(_list593.size); + long _elem594; + for (int _i595 = 0; _i595 < _list593.size; ++_i595) { - _elem584 = iprot.readI64(); - struct.replSrcTxnIds.add(_elem584); + _elem594 = iprot.readI64(); + struct.replSrcTxnIds.add(_elem594); } } struct.setReplSrcTxnIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java index 71a2c4fd80..73ca5e0ae3 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, OpenTxnsResponse st case 1: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list586 = iprot.readListBegin(); - struct.txn_ids = new ArrayList(_list586.size); - long _elem587; - for (int _i588 = 0; _i588 < _list586.size; ++_i588) + org.apache.thrift.protocol.TList _list596 = iprot.readListBegin(); + struct.txn_ids = new ArrayList(_list596.size); + long _elem597; + for (int _i598 = 0; _i598 < _list596.size; ++_i598) { - _elem587 = iprot.readI64(); - struct.txn_ids.add(_elem587); + _elem597 = iprot.readI64(); + struct.txn_ids.add(_elem597); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, OpenTxnsResponse s oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txn_ids.size())); - for (long _iter589 : struct.txn_ids) + for (long _iter599 : struct.txn_ids) { - oprot.writeI64(_iter589); + oprot.writeI64(_iter599); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txn_ids.size()); - for (long _iter590 : struct.txn_ids) + for (long _iter600 : struct.txn_ids) { - oprot.writeI64(_iter590); + oprot.writeI64(_iter600); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse st public void read(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list591 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txn_ids = new ArrayList(_list591.size); - long _elem592; - for (int _i593 = 0; _i593 < _list591.size; ++_i593) + org.apache.thrift.protocol.TList _list601 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txn_ids = new ArrayList(_list601.size); + long _elem602; + for (int _i603 = 0; _i603 < _list601.size; ++_i603) { - _elem592 = iprot.readI64(); - struct.txn_ids.add(_elem592); + _elem602 = iprot.readI64(); + struct.txn_ids.add(_elem602); } } struct.setTxn_idsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java index 8309769eee..30bdda649a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java @@ -1042,14 +1042,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRequ case 3: // PARTITION_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list522 = iprot.readListBegin(); - struct.partitionKeys = new ArrayList(_list522.size); - FieldSchema _elem523; - for (int _i524 = 0; _i524 < _list522.size; ++_i524) + org.apache.thrift.protocol.TList _list532 = iprot.readListBegin(); + struct.partitionKeys = new ArrayList(_list532.size); + FieldSchema _elem533; + for (int _i534 = 0; _i534 < _list532.size; ++_i534) { - _elem523 = new FieldSchema(); - _elem523.read(iprot); - struct.partitionKeys.add(_elem523); + _elem533 = new FieldSchema(); + _elem533.read(iprot); + struct.partitionKeys.add(_elem533); } iprot.readListEnd(); } @@ -1077,14 +1077,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRequ case 6: // PARTITION_ORDER if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list525 = iprot.readListBegin(); - struct.partitionOrder = new ArrayList(_list525.size); - FieldSchema _elem526; - for (int _i527 = 0; _i527 < _list525.size; ++_i527) + org.apache.thrift.protocol.TList _list535 = iprot.readListBegin(); + struct.partitionOrder = new ArrayList(_list535.size); + FieldSchema _elem536; + for (int _i537 = 0; _i537 < _list535.size; ++_i537) { - _elem526 = new FieldSchema(); - _elem526.read(iprot); - struct.partitionOrder.add(_elem526); + _elem536 = new FieldSchema(); + _elem536.read(iprot); + struct.partitionOrder.add(_elem536); } iprot.readListEnd(); } @@ -1144,9 +1144,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesReq oprot.writeFieldBegin(PARTITION_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionKeys.size())); - for (FieldSchema _iter528 : struct.partitionKeys) + for (FieldSchema _iter538 : struct.partitionKeys) { - _iter528.write(oprot); + _iter538.write(oprot); } oprot.writeListEnd(); } @@ -1169,9 +1169,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesReq oprot.writeFieldBegin(PARTITION_ORDER_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionOrder.size())); - for (FieldSchema _iter529 : struct.partitionOrder) + for (FieldSchema _iter539 : struct.partitionOrder) { - _iter529.write(oprot); + _iter539.write(oprot); } oprot.writeListEnd(); } @@ -1216,9 +1216,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ oprot.writeString(struct.tblName); { oprot.writeI32(struct.partitionKeys.size()); - for (FieldSchema _iter530 : struct.partitionKeys) + for (FieldSchema _iter540 : struct.partitionKeys) { - _iter530.write(oprot); + _iter540.write(oprot); } } BitSet optionals = new BitSet(); @@ -1250,9 +1250,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ if (struct.isSetPartitionOrder()) { { oprot.writeI32(struct.partitionOrder.size()); - for (FieldSchema _iter531 : struct.partitionOrder) + for (FieldSchema _iter541 : struct.partitionOrder) { - _iter531.write(oprot); + _iter541.write(oprot); } } } @@ -1275,14 +1275,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list532 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionKeys = new ArrayList(_list532.size); - FieldSchema _elem533; - for (int _i534 = 0; _i534 < _list532.size; ++_i534) + org.apache.thrift.protocol.TList _list542 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionKeys = new ArrayList(_list542.size); + FieldSchema _elem543; + for (int _i544 = 0; _i544 < _list542.size; ++_i544) { - _elem533 = new FieldSchema(); - _elem533.read(iprot); - struct.partitionKeys.add(_elem533); + _elem543 = new FieldSchema(); + _elem543.read(iprot); + struct.partitionKeys.add(_elem543); } } struct.setPartitionKeysIsSet(true); @@ -1297,14 +1297,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list535 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionOrder = new ArrayList(_list535.size); - FieldSchema _elem536; - for (int _i537 = 0; _i537 < _list535.size; ++_i537) + org.apache.thrift.protocol.TList _list545 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionOrder = new ArrayList(_list545.size); + FieldSchema _elem546; + for (int _i547 = 0; _i547 < _list545.size; ++_i547) { - _elem536 = new FieldSchema(); - _elem536.read(iprot); - struct.partitionOrder.add(_elem536); + _elem546 = new FieldSchema(); + _elem546.read(iprot); + struct.partitionOrder.add(_elem546); } } struct.setPartitionOrderIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java index 7bf1c61a42..1cf7920315 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesResp case 1: // PARTITION_VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list546 = iprot.readListBegin(); - struct.partitionValues = new ArrayList(_list546.size); - PartitionValuesRow _elem547; - for (int _i548 = 0; _i548 < _list546.size; ++_i548) + org.apache.thrift.protocol.TList _list556 = iprot.readListBegin(); + struct.partitionValues = new ArrayList(_list556.size); + PartitionValuesRow _elem557; + for (int _i558 = 0; _i558 < _list556.size; ++_i558) { - _elem547 = new PartitionValuesRow(); - _elem547.read(iprot); - struct.partitionValues.add(_elem547); + _elem557 = new PartitionValuesRow(); + _elem557.read(iprot); + struct.partitionValues.add(_elem557); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesRes oprot.writeFieldBegin(PARTITION_VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionValues.size())); - for (PartitionValuesRow _iter549 : struct.partitionValues) + for (PartitionValuesRow _iter559 : struct.partitionValues) { - _iter549.write(oprot); + _iter559.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResp TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partitionValues.size()); - for (PartitionValuesRow _iter550 : struct.partitionValues) + for (PartitionValuesRow _iter560 : struct.partitionValues) { - _iter550.write(oprot); + _iter560.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResp public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list551 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionValues = new ArrayList(_list551.size); - PartitionValuesRow _elem552; - for (int _i553 = 0; _i553 < _list551.size; ++_i553) + org.apache.thrift.protocol.TList _list561 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionValues = new ArrayList(_list561.size); + PartitionValuesRow _elem562; + for (int _i563 = 0; _i563 < _list561.size; ++_i563) { - _elem552 = new PartitionValuesRow(); - _elem552.read(iprot); - struct.partitionValues.add(_elem552); + _elem562 = new PartitionValuesRow(); + _elem562.read(iprot); + struct.partitionValues.add(_elem562); } } struct.setPartitionValuesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java index 4c384f6408..dc36a1feef 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRow case 1: // ROW if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list538 = iprot.readListBegin(); - struct.row = new ArrayList(_list538.size); - String _elem539; - for (int _i540 = 0; _i540 < _list538.size; ++_i540) + org.apache.thrift.protocol.TList _list548 = iprot.readListBegin(); + struct.row = new ArrayList(_list548.size); + String _elem549; + for (int _i550 = 0; _i550 < _list548.size; ++_i550) { - _elem539 = iprot.readString(); - struct.row.add(_elem539); + _elem549 = iprot.readString(); + struct.row.add(_elem549); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesRow oprot.writeFieldBegin(ROW_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.row.size())); - for (String _iter541 : struct.row) + for (String _iter551 : struct.row) { - oprot.writeString(_iter541); + oprot.writeString(_iter551); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.row.size()); - for (String _iter542 : struct.row) + for (String _iter552 : struct.row) { - oprot.writeString(_iter542); + oprot.writeString(_iter552); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list543 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.row = new ArrayList(_list543.size); - String _elem544; - for (int _i545 = 0; _i545 < _list543.size; ++_i545) + org.apache.thrift.protocol.TList _list553 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.row = new ArrayList(_list553.size); + String _elem554; + for (int _i555 = 0; _i555 < _list553.size; ++_i555) { - _elem544 = iprot.readString(); - struct.row.add(_elem544); + _elem554 = iprot.readString(); + struct.row.add(_elem554); } } struct.setRowIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java index 27cdac609a..f6094a5627 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java @@ -439,14 +439,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprRes case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list424 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list424.size); - Partition _elem425; - for (int _i426 = 0; _i426 < _list424.size; ++_i426) + org.apache.thrift.protocol.TList _list434 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list434.size); + Partition _elem435; + for (int _i436 = 0; _i436 < _list434.size; ++_i436) { - _elem425 = new Partition(); - _elem425.read(iprot); - struct.partitions.add(_elem425); + _elem435 = new Partition(); + _elem435.read(iprot); + struct.partitions.add(_elem435); } iprot.readListEnd(); } @@ -480,9 +480,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprRe oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter427 : struct.partitions) + for (Partition _iter437 : struct.partitions) { - _iter427.write(oprot); + _iter437.write(oprot); } oprot.writeListEnd(); } @@ -510,9 +510,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partitions.size()); - for (Partition _iter428 : struct.partitions) + for (Partition _iter438 : struct.partitions) { - _iter428.write(oprot); + _iter438.write(oprot); } } oprot.writeBool(struct.hasUnknownPartitions); @@ -522,14 +522,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRes public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list429 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list429.size); - Partition _elem430; - for (int _i431 = 0; _i431 < _list429.size; ++_i431) + org.apache.thrift.protocol.TList _list439 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list439.size); + Partition _elem440; + for (int _i441 = 0; _i441 < _list439.size; ++_i441) { - _elem430 = new Partition(); - _elem430.read(iprot); - struct.partitions.add(_elem430); + _elem440 = new Partition(); + _elem440.read(iprot); + struct.partitions.add(_elem440); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java index 7e0842072f..429711a902 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java @@ -802,13 +802,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ case 3: // COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list466 = iprot.readListBegin(); - struct.colNames = new ArrayList(_list466.size); - String _elem467; - for (int _i468 = 0; _i468 < _list466.size; ++_i468) + org.apache.thrift.protocol.TList _list476 = iprot.readListBegin(); + struct.colNames = new ArrayList(_list476.size); + String _elem477; + for (int _i478 = 0; _i478 < _list476.size; ++_i478) { - _elem467 = iprot.readString(); - struct.colNames.add(_elem467); + _elem477 = iprot.readString(); + struct.colNames.add(_elem477); } iprot.readListEnd(); } @@ -820,13 +820,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ case 4: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list469 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list469.size); - String _elem470; - for (int _i471 = 0; _i471 < _list469.size; ++_i471) + org.apache.thrift.protocol.TList _list479 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list479.size); + String _elem480; + for (int _i481 = 0; _i481 < _list479.size; ++_i481) { - _elem470 = iprot.readString(); - struct.partNames.add(_elem470); + _elem480 = iprot.readString(); + struct.partNames.add(_elem480); } iprot.readListEnd(); } @@ -878,9 +878,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldBegin(COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size())); - for (String _iter472 : struct.colNames) + for (String _iter482 : struct.colNames) { - oprot.writeString(_iter472); + oprot.writeString(_iter482); } oprot.writeListEnd(); } @@ -890,9 +890,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter473 : struct.partNames) + for (String _iter483 : struct.partNames) { - oprot.writeString(_iter473); + oprot.writeString(_iter483); } oprot.writeListEnd(); } @@ -933,16 +933,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequ oprot.writeString(struct.tblName); { oprot.writeI32(struct.colNames.size()); - for (String _iter474 : struct.colNames) + for (String _iter484 : struct.colNames) { - oprot.writeString(_iter474); + oprot.writeString(_iter484); } } { oprot.writeI32(struct.partNames.size()); - for (String _iter475 : struct.partNames) + for (String _iter485 : struct.partNames) { - oprot.writeString(_iter475); + oprot.writeString(_iter485); } } BitSet optionals = new BitSet(); @@ -969,24 +969,24 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsReque struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list476 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.colNames = new ArrayList(_list476.size); - String _elem477; - for (int _i478 = 0; _i478 < _list476.size; ++_i478) + org.apache.thrift.protocol.TList _list486 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.colNames = new ArrayList(_list486.size); + String _elem487; + for (int _i488 = 0; _i488 < _list486.size; ++_i488) { - _elem477 = iprot.readString(); - struct.colNames.add(_elem477); + _elem487 = iprot.readString(); + struct.colNames.add(_elem487); } } struct.setColNamesIsSet(true); { - org.apache.thrift.protocol.TList _list479 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list479.size); - String _elem480; - for (int _i481 = 0; _i481 < _list479.size; ++_i481) + org.apache.thrift.protocol.TList _list489 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list489.size); + String _elem490; + for (int _i491 = 0; _i491 < _list489.size; ++_i491) { - _elem480 = iprot.readString(); - struct.partNames.add(_elem480); + _elem490 = iprot.readString(); + struct.partNames.add(_elem490); } } struct.setPartNamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java index becfcc904d..6dcc9facae 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java @@ -39,7 +39,6 @@ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsResult"); private static final org.apache.thrift.protocol.TField PART_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("partStats", org.apache.thrift.protocol.TType.MAP, (short)1); - private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -48,12 +47,10 @@ } private Map> partStats; // required - private boolean isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - PART_STATS((short)1, "partStats"), - IS_STATS_COMPLIANT((short)2, "isStatsCompliant"); + PART_STATS((short)1, "partStats"); private static final Map byName = new HashMap(); @@ -70,8 +67,6 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // PART_STATS return PART_STATS; - case 2: // IS_STATS_COMPLIANT - return IS_STATS_COMPLIANT; default: return null; } @@ -112,9 +107,6 @@ public String getFieldName() { } // isset id assignments - private static final int __ISSTATSCOMPLIANT_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -123,8 +115,6 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))))); - tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsResult.class, metaDataMap); } @@ -143,7 +133,6 @@ public PartitionsStatsResult( * Performs a deep copy on other. */ public PartitionsStatsResult(PartitionsStatsResult other) { - __isset_bitfield = other.__isset_bitfield; if (other.isSetPartStats()) { Map> __this__partStats = new HashMap>(other.partStats.size()); for (Map.Entry> other_element : other.partStats.entrySet()) { @@ -162,7 +151,6 @@ public PartitionsStatsResult(PartitionsStatsResult other) { } this.partStats = __this__partStats; } - this.isStatsCompliant = other.isStatsCompliant; } public PartitionsStatsResult deepCopy() { @@ -172,8 +160,6 @@ public PartitionsStatsResult deepCopy() { @Override public void clear() { this.partStats = null; - setIsStatsCompliantIsSet(false); - this.isStatsCompliant = false; } public int getPartStatsSize() { @@ -210,28 +196,6 @@ public void setPartStatsIsSet(boolean value) { } } - public boolean isIsStatsCompliant() { - return this.isStatsCompliant; - } - - public void setIsStatsCompliant(boolean isStatsCompliant) { - this.isStatsCompliant = isStatsCompliant; - setIsStatsCompliantIsSet(true); - } - - public void unsetIsStatsCompliant() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); - } - - /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ - public boolean isSetIsStatsCompliant() { - return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); - } - - public void setIsStatsCompliantIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case PART_STATS: @@ -242,14 +206,6 @@ public void setFieldValue(_Fields field, Object value) { } break; - case IS_STATS_COMPLIANT: - if (value == null) { - unsetIsStatsCompliant(); - } else { - setIsStatsCompliant((Boolean)value); - } - break; - } } @@ -258,9 +214,6 @@ public Object getFieldValue(_Fields field) { case PART_STATS: return getPartStats(); - case IS_STATS_COMPLIANT: - return isIsStatsCompliant(); - } throw new IllegalStateException(); } @@ -274,8 +227,6 @@ public boolean isSet(_Fields field) { switch (field) { case PART_STATS: return isSetPartStats(); - case IS_STATS_COMPLIANT: - return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -302,15 +253,6 @@ public boolean equals(PartitionsStatsResult that) { return false; } - boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); - boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); - if (this_present_isStatsCompliant || that_present_isStatsCompliant) { - if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) - return false; - if (this.isStatsCompliant != that.isStatsCompliant) - return false; - } - return true; } @@ -323,11 +265,6 @@ public int hashCode() { if (present_partStats) list.add(partStats); - boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); - list.add(present_isStatsCompliant); - if (present_isStatsCompliant) - list.add(isStatsCompliant); - return list.hashCode(); } @@ -349,16 +286,6 @@ public int compareTo(PartitionsStatsResult other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetIsStatsCompliant()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -386,12 +313,6 @@ public String toString() { sb.append(this.partStats); } first = false; - if (isSetIsStatsCompliant()) { - if (!first) sb.append(", "); - sb.append("isStatsCompliant:"); - sb.append(this.isStatsCompliant); - first = false; - } sb.append(")"); return sb.toString(); } @@ -415,8 +336,6 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -444,26 +363,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsResu case 1: // PART_STATS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map440 = iprot.readMapBegin(); - struct.partStats = new HashMap>(2*_map440.size); - String _key441; - List _val442; - for (int _i443 = 0; _i443 < _map440.size; ++_i443) + org.apache.thrift.protocol.TMap _map450 = iprot.readMapBegin(); + struct.partStats = new HashMap>(2*_map450.size); + String _key451; + List _val452; + for (int _i453 = 0; _i453 < _map450.size; ++_i453) { - _key441 = iprot.readString(); + _key451 = iprot.readString(); { - org.apache.thrift.protocol.TList _list444 = iprot.readListBegin(); - _val442 = new ArrayList(_list444.size); - ColumnStatisticsObj _elem445; - for (int _i446 = 0; _i446 < _list444.size; ++_i446) + org.apache.thrift.protocol.TList _list454 = iprot.readListBegin(); + _val452 = new ArrayList(_list454.size); + ColumnStatisticsObj _elem455; + for (int _i456 = 0; _i456 < _list454.size; ++_i456) { - _elem445 = new ColumnStatisticsObj(); - _elem445.read(iprot); - _val442.add(_elem445); + _elem455 = new ColumnStatisticsObj(); + _elem455.read(iprot); + _val452.add(_elem455); } iprot.readListEnd(); } - struct.partStats.put(_key441, _val442); + struct.partStats.put(_key451, _val452); } iprot.readMapEnd(); } @@ -472,14 +391,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsResu org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // IS_STATS_COMPLIANT - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.isStatsCompliant = iprot.readBool(); - struct.setIsStatsCompliantIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -497,14 +408,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsRes oprot.writeFieldBegin(PART_STATS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.partStats.size())); - for (Map.Entry> _iter447 : struct.partStats.entrySet()) + for (Map.Entry> _iter457 : struct.partStats.entrySet()) { - oprot.writeString(_iter447.getKey()); + oprot.writeString(_iter457.getKey()); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter447.getValue().size())); - for (ColumnStatisticsObj _iter448 : _iter447.getValue()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter457.getValue().size())); + for (ColumnStatisticsObj _iter458 : _iter457.getValue()) { - _iter448.write(oprot); + _iter458.write(oprot); } oprot.writeListEnd(); } @@ -513,11 +424,6 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsRes } oprot.writeFieldEnd(); } - if (struct.isSetIsStatsCompliant()) { - oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); - oprot.writeBool(struct.isStatsCompliant); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -537,59 +443,46 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partStats.size()); - for (Map.Entry> _iter449 : struct.partStats.entrySet()) + for (Map.Entry> _iter459 : struct.partStats.entrySet()) { - oprot.writeString(_iter449.getKey()); + oprot.writeString(_iter459.getKey()); { - oprot.writeI32(_iter449.getValue().size()); - for (ColumnStatisticsObj _iter450 : _iter449.getValue()) + oprot.writeI32(_iter459.getValue().size()); + for (ColumnStatisticsObj _iter460 : _iter459.getValue()) { - _iter450.write(oprot); + _iter460.write(oprot); } } } } - BitSet optionals = new BitSet(); - if (struct.isSetIsStatsCompliant()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetIsStatsCompliant()) { - oprot.writeBool(struct.isStatsCompliant); - } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map451 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.partStats = new HashMap>(2*_map451.size); - String _key452; - List _val453; - for (int _i454 = 0; _i454 < _map451.size; ++_i454) + org.apache.thrift.protocol.TMap _map461 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); + struct.partStats = new HashMap>(2*_map461.size); + String _key462; + List _val463; + for (int _i464 = 0; _i464 < _map461.size; ++_i464) { - _key452 = iprot.readString(); + _key462 = iprot.readString(); { - org.apache.thrift.protocol.TList _list455 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - _val453 = new ArrayList(_list455.size); - ColumnStatisticsObj _elem456; - for (int _i457 = 0; _i457 < _list455.size; ++_i457) + org.apache.thrift.protocol.TList _list465 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + _val463 = new ArrayList(_list465.size); + ColumnStatisticsObj _elem466; + for (int _i467 = 0; _i467 < _list465.size; ++_i467) { - _elem456 = new ColumnStatisticsObj(); - _elem456.read(iprot); - _val453.add(_elem456); + _elem466 = new ColumnStatisticsObj(); + _elem466.read(iprot); + _val463.add(_elem466); } } - struct.partStats.put(_key452, _val453); + struct.partStats.put(_key462, _val463); } } struct.setPartStatsIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.isStatsCompliant = iprot.readBool(); - struct.setIsStatsCompliantIsSet(true); - } } } diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java index db265ea4f4..4583c1ea2a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PrimaryKeysResponse case 1: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list328 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list328.size); - SQLPrimaryKey _elem329; - for (int _i330 = 0; _i330 < _list328.size; ++_i330) + org.apache.thrift.protocol.TList _list338 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list338.size); + SQLPrimaryKey _elem339; + for (int _i340 = 0; _i340 < _list338.size; ++_i340) { - _elem329 = new SQLPrimaryKey(); - _elem329.read(iprot); - struct.primaryKeys.add(_elem329); + _elem339 = new SQLPrimaryKey(); + _elem339.read(iprot); + struct.primaryKeys.add(_elem339); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PrimaryKeysRespons oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter331 : struct.primaryKeys) + for (SQLPrimaryKey _iter341 : struct.primaryKeys) { - _iter331.write(oprot); + _iter341.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter332 : struct.primaryKeys) + for (SQLPrimaryKey _iter342 : struct.primaryKeys) { - _iter332.write(oprot); + _iter342.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysResponse public void read(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list333.size); - SQLPrimaryKey _elem334; - for (int _i335 = 0; _i335 < _list333.size; ++_i335) + org.apache.thrift.protocol.TList _list343 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list343.size); + SQLPrimaryKey _elem344; + for (int _i345 = 0; _i345 < _list343.size; ++_i345) { - _elem334 = new SQLPrimaryKey(); - _elem334.read(iprot); - struct.primaryKeys.add(_elem334); + _elem344 = new SQLPrimaryKey(); + _elem344.read(iprot); + struct.primaryKeys.add(_elem344); } } struct.setPrimaryKeysIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java index e19034c630..deff8597aa 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java @@ -547,13 +547,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list816 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list816.size); - long _elem817; - for (int _i818 = 0; _i818 < _list816.size; ++_i818) + org.apache.thrift.protocol.TList _list826 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list826.size); + long _elem827; + for (int _i828 = 0; _i828 < _list826.size; ++_i828) { - _elem817 = iprot.readI64(); - struct.fileIds.add(_elem817); + _elem827 = iprot.readI64(); + struct.fileIds.add(_elem827); } iprot.readListEnd(); } @@ -565,13 +565,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 2: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list819 = iprot.readListBegin(); - struct.metadata = new ArrayList(_list819.size); - ByteBuffer _elem820; - for (int _i821 = 0; _i821 < _list819.size; ++_i821) + org.apache.thrift.protocol.TList _list829 = iprot.readListBegin(); + struct.metadata = new ArrayList(_list829.size); + ByteBuffer _elem830; + for (int _i831 = 0; _i831 < _list829.size; ++_i831) { - _elem820 = iprot.readBinary(); - struct.metadata.add(_elem820); + _elem830 = iprot.readBinary(); + struct.metadata.add(_elem830); } iprot.readListEnd(); } @@ -605,9 +605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter822 : struct.fileIds) + for (long _iter832 : struct.fileIds) { - oprot.writeI64(_iter822); + oprot.writeI64(_iter832); } oprot.writeListEnd(); } @@ -617,9 +617,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (ByteBuffer _iter823 : struct.metadata) + for (ByteBuffer _iter833 : struct.metadata) { - oprot.writeBinary(_iter823); + oprot.writeBinary(_iter833); } oprot.writeListEnd(); } @@ -651,16 +651,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter824 : struct.fileIds) + for (long _iter834 : struct.fileIds) { - oprot.writeI64(_iter824); + oprot.writeI64(_iter834); } } { oprot.writeI32(struct.metadata.size()); - for (ByteBuffer _iter825 : struct.metadata) + for (ByteBuffer _iter835 : struct.metadata) { - oprot.writeBinary(_iter825); + oprot.writeBinary(_iter835); } } BitSet optionals = new BitSet(); @@ -677,24 +677,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list826 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list826.size); - long _elem827; - for (int _i828 = 0; _i828 < _list826.size; ++_i828) + org.apache.thrift.protocol.TList _list836 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list836.size); + long _elem837; + for (int _i838 = 0; _i838 < _list836.size; ++_i838) { - _elem827 = iprot.readI64(); - struct.fileIds.add(_elem827); + _elem837 = iprot.readI64(); + struct.fileIds.add(_elem837); } } struct.setFileIdsIsSet(true); { - org.apache.thrift.protocol.TList _list829 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new ArrayList(_list829.size); - ByteBuffer _elem830; - for (int _i831 = 0; _i831 < _list829.size; ++_i831) + org.apache.thrift.protocol.TList _list839 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new ArrayList(_list839.size); + ByteBuffer _elem840; + for (int _i841 = 0; _i841 < _list839.size; ++_i841) { - _elem830 = iprot.readBinary(); - struct.metadata.add(_elem830); + _elem840 = iprot.readBinary(); + struct.metadata.add(_elem840); } } struct.setMetadataIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java index 3540e99336..5b3853a038 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java @@ -796,13 +796,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, RenamePartitionRequ case 4: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list960 = iprot.readListBegin(); - struct.partVals = new ArrayList(_list960.size); - String _elem961; - for (int _i962 = 0; _i962 < _list960.size; ++_i962) + org.apache.thrift.protocol.TList _list970 = iprot.readListBegin(); + struct.partVals = new ArrayList(_list970.size); + String _elem971; + for (int _i972 = 0; _i972 < _list970.size; ++_i972) { - _elem961 = iprot.readString(); - struct.partVals.add(_elem961); + _elem971 = iprot.readString(); + struct.partVals.add(_elem971); } iprot.readListEnd(); } @@ -862,9 +862,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, RenamePartitionReq oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partVals.size())); - for (String _iter963 : struct.partVals) + for (String _iter973 : struct.partVals) { - oprot.writeString(_iter963); + oprot.writeString(_iter973); } oprot.writeListEnd(); } @@ -903,9 +903,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, RenamePartitionRequ oprot.writeString(struct.tableName); { oprot.writeI32(struct.partVals.size()); - for (String _iter964 : struct.partVals) + for (String _iter974 : struct.partVals) { - oprot.writeString(_iter964); + oprot.writeString(_iter974); } } struct.newPart.write(oprot); @@ -933,13 +933,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, RenamePartitionReque struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); { - org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partVals = new ArrayList(_list965.size); - String _elem966; - for (int _i967 = 0; _i967 < _list965.size; ++_i967) + org.apache.thrift.protocol.TList _list975 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partVals = new ArrayList(_list975.size); + String _elem976; + for (int _i977 = 0; _i977 < _list975.size; ++_i977) { - _elem966 = iprot.readString(); - struct.partVals.add(_elem966); + _elem976 = iprot.readString(); + struct.partVals.add(_elem976); } } struct.setPartValsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java index f637d519f4..12a397be02 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java @@ -813,13 +813,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ReplTblWriteIdState case 6: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list610 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list610.size); - String _elem611; - for (int _i612 = 0; _i612 < _list610.size; ++_i612) + org.apache.thrift.protocol.TList _list620 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list620.size); + String _elem621; + for (int _i622 = 0; _i622 < _list620.size; ++_i622) { - _elem611 = iprot.readString(); - struct.partNames.add(_elem611); + _elem621 = iprot.readString(); + struct.partNames.add(_elem621); } iprot.readListEnd(); } @@ -871,9 +871,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ReplTblWriteIdStat oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter613 : struct.partNames) + for (String _iter623 : struct.partNames) { - oprot.writeString(_iter613); + oprot.writeString(_iter623); } oprot.writeListEnd(); } @@ -910,9 +910,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdState if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter614 : struct.partNames) + for (String _iter624 : struct.partNames) { - oprot.writeString(_iter614); + oprot.writeString(_iter624); } } } @@ -934,13 +934,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdStateR BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list615 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list615.size); - String _elem616; - for (int _i617 = 0; _i617 < _list615.size; ++_i617) + org.apache.thrift.protocol.TList _list625 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list625.size); + String _elem626; + for (int _i627 = 0; _i627 < _list625.size; ++_i627) { - _elem616 = iprot.readString(); - struct.partNames.add(_elem616); + _elem626 = iprot.readString(); + struct.partNames.add(_elem626); } } struct.setPartNamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java index 19de923dcb..2ad779dc40 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java @@ -168,13 +168,13 @@ protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol ip if (field.type == NAMES_FIELD_DESC.type) { List names; { - org.apache.thrift.protocol.TList _list506 = iprot.readListBegin(); - names = new ArrayList(_list506.size); - String _elem507; - for (int _i508 = 0; _i508 < _list506.size; ++_i508) + org.apache.thrift.protocol.TList _list516 = iprot.readListBegin(); + names = new ArrayList(_list516.size); + String _elem517; + for (int _i518 = 0; _i518 < _list516.size; ++_i518) { - _elem507 = iprot.readString(); - names.add(_elem507); + _elem517 = iprot.readString(); + names.add(_elem517); } iprot.readListEnd(); } @@ -187,14 +187,14 @@ protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol ip if (field.type == EXPRS_FIELD_DESC.type) { List exprs; { - org.apache.thrift.protocol.TList _list509 = iprot.readListBegin(); - exprs = new ArrayList(_list509.size); - DropPartitionsExpr _elem510; - for (int _i511 = 0; _i511 < _list509.size; ++_i511) + org.apache.thrift.protocol.TList _list519 = iprot.readListBegin(); + exprs = new ArrayList(_list519.size); + DropPartitionsExpr _elem520; + for (int _i521 = 0; _i521 < _list519.size; ++_i521) { - _elem510 = new DropPartitionsExpr(); - _elem510.read(iprot); - exprs.add(_elem510); + _elem520 = new DropPartitionsExpr(); + _elem520.read(iprot); + exprs.add(_elem520); } iprot.readListEnd(); } @@ -219,9 +219,9 @@ protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol opr List names = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size())); - for (String _iter512 : names) + for (String _iter522 : names) { - oprot.writeString(_iter512); + oprot.writeString(_iter522); } oprot.writeListEnd(); } @@ -230,9 +230,9 @@ protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol opr List exprs = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size())); - for (DropPartitionsExpr _iter513 : exprs) + for (DropPartitionsExpr _iter523 : exprs) { - _iter513.write(oprot); + _iter523.write(oprot); } oprot.writeListEnd(); } @@ -250,13 +250,13 @@ protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot case NAMES: List names; { - org.apache.thrift.protocol.TList _list514 = iprot.readListBegin(); - names = new ArrayList(_list514.size); - String _elem515; - for (int _i516 = 0; _i516 < _list514.size; ++_i516) + org.apache.thrift.protocol.TList _list524 = iprot.readListBegin(); + names = new ArrayList(_list524.size); + String _elem525; + for (int _i526 = 0; _i526 < _list524.size; ++_i526) { - _elem515 = iprot.readString(); - names.add(_elem515); + _elem525 = iprot.readString(); + names.add(_elem525); } iprot.readListEnd(); } @@ -264,14 +264,14 @@ protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot case EXPRS: List exprs; { - org.apache.thrift.protocol.TList _list517 = iprot.readListBegin(); - exprs = new ArrayList(_list517.size); - DropPartitionsExpr _elem518; - for (int _i519 = 0; _i519 < _list517.size; ++_i519) + org.apache.thrift.protocol.TList _list527 = iprot.readListBegin(); + exprs = new ArrayList(_list527.size); + DropPartitionsExpr _elem528; + for (int _i529 = 0; _i529 < _list527.size; ++_i529) { - _elem518 = new DropPartitionsExpr(); - _elem518.read(iprot); - exprs.add(_elem518); + _elem528 = new DropPartitionsExpr(); + _elem528.read(iprot); + exprs.add(_elem528); } iprot.readListEnd(); } @@ -291,9 +291,9 @@ protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) List names = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size())); - for (String _iter520 : names) + for (String _iter530 : names) { - oprot.writeString(_iter520); + oprot.writeString(_iter530); } oprot.writeListEnd(); } @@ -302,9 +302,9 @@ protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) List exprs = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size())); - for (DropPartitionsExpr _iter521 : exprs) + for (DropPartitionsExpr _iter531 : exprs) { - _iter521.write(oprot); + _iter531.write(oprot); } oprot.writeListEnd(); } diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java index b4cd16f8cf..cf9ae7c24b 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java @@ -445,14 +445,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Schema struct) thro case 1: // FIELD_SCHEMAS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list300 = iprot.readListBegin(); - struct.fieldSchemas = new ArrayList(_list300.size); - FieldSchema _elem301; - for (int _i302 = 0; _i302 < _list300.size; ++_i302) + org.apache.thrift.protocol.TList _list310 = iprot.readListBegin(); + struct.fieldSchemas = new ArrayList(_list310.size); + FieldSchema _elem311; + for (int _i312 = 0; _i312 < _list310.size; ++_i312) { - _elem301 = new FieldSchema(); - _elem301.read(iprot); - struct.fieldSchemas.add(_elem301); + _elem311 = new FieldSchema(); + _elem311.read(iprot); + struct.fieldSchemas.add(_elem311); } iprot.readListEnd(); } @@ -464,15 +464,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Schema struct) thro case 2: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map303 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map303.size); - String _key304; - String _val305; - for (int _i306 = 0; _i306 < _map303.size; ++_i306) + org.apache.thrift.protocol.TMap _map313 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map313.size); + String _key314; + String _val315; + for (int _i316 = 0; _i316 < _map313.size; ++_i316) { - _key304 = iprot.readString(); - _val305 = iprot.readString(); - struct.properties.put(_key304, _val305); + _key314 = iprot.readString(); + _val315 = iprot.readString(); + struct.properties.put(_key314, _val315); } iprot.readMapEnd(); } @@ -498,9 +498,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Schema struct) thr oprot.writeFieldBegin(FIELD_SCHEMAS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.fieldSchemas.size())); - for (FieldSchema _iter307 : struct.fieldSchemas) + for (FieldSchema _iter317 : struct.fieldSchemas) { - _iter307.write(oprot); + _iter317.write(oprot); } oprot.writeListEnd(); } @@ -510,10 +510,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Schema struct) thr oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter308 : struct.properties.entrySet()) + for (Map.Entry _iter318 : struct.properties.entrySet()) { - oprot.writeString(_iter308.getKey()); - oprot.writeString(_iter308.getValue()); + oprot.writeString(_iter318.getKey()); + oprot.writeString(_iter318.getValue()); } oprot.writeMapEnd(); } @@ -547,19 +547,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Schema struct) thro if (struct.isSetFieldSchemas()) { { oprot.writeI32(struct.fieldSchemas.size()); - for (FieldSchema _iter309 : struct.fieldSchemas) + for (FieldSchema _iter319 : struct.fieldSchemas) { - _iter309.write(oprot); + _iter319.write(oprot); } } } if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter310 : struct.properties.entrySet()) + for (Map.Entry _iter320 : struct.properties.entrySet()) { - oprot.writeString(_iter310.getKey()); - oprot.writeString(_iter310.getValue()); + oprot.writeString(_iter320.getKey()); + oprot.writeString(_iter320.getValue()); } } } @@ -571,29 +571,29 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Schema struct) throw BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list311 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.fieldSchemas = new ArrayList(_list311.size); - FieldSchema _elem312; - for (int _i313 = 0; _i313 < _list311.size; ++_i313) + org.apache.thrift.protocol.TList _list321 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.fieldSchemas = new ArrayList(_list321.size); + FieldSchema _elem322; + for (int _i323 = 0; _i323 < _list321.size; ++_i323) { - _elem312 = new FieldSchema(); - _elem312.read(iprot); - struct.fieldSchemas.add(_elem312); + _elem322 = new FieldSchema(); + _elem322.read(iprot); + struct.fieldSchemas.add(_elem322); } } struct.setFieldSchemasIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TMap _map314 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map314.size); - String _key315; - String _val316; - for (int _i317 = 0; _i317 < _map314.size; ++_i317) + org.apache.thrift.protocol.TMap _map324 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map324.size); + String _key325; + String _val326; + for (int _i327 = 0; _i327 < _map324.size; ++_i327) { - _key315 = iprot.readString(); - _val316 = iprot.readString(); - struct.properties.put(_key315, _val316); + _key325 = iprot.readString(); + _val326 = iprot.readString(); + struct.properties.put(_key325, _val326); } } struct.setPropertiesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java index 88d7e3fedf..703ff70486 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java @@ -1119,14 +1119,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SchemaVersion struc case 4: // COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list936 = iprot.readListBegin(); - struct.cols = new ArrayList(_list936.size); - FieldSchema _elem937; - for (int _i938 = 0; _i938 < _list936.size; ++_i938) + org.apache.thrift.protocol.TList _list946 = iprot.readListBegin(); + struct.cols = new ArrayList(_list946.size); + FieldSchema _elem947; + for (int _i948 = 0; _i948 < _list946.size; ++_i948) { - _elem937 = new FieldSchema(); - _elem937.read(iprot); - struct.cols.add(_elem937); + _elem947 = new FieldSchema(); + _elem947.read(iprot); + struct.cols.add(_elem947); } iprot.readListEnd(); } @@ -1212,9 +1212,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SchemaVersion stru oprot.writeFieldBegin(COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.cols.size())); - for (FieldSchema _iter939 : struct.cols) + for (FieldSchema _iter949 : struct.cols) { - _iter939.write(oprot); + _iter949.write(oprot); } oprot.writeListEnd(); } @@ -1323,9 +1323,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struc if (struct.isSetCols()) { { oprot.writeI32(struct.cols.size()); - for (FieldSchema _iter940 : struct.cols) + for (FieldSchema _iter950 : struct.cols) { - _iter940.write(oprot); + _iter950.write(oprot); } } } @@ -1368,14 +1368,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struct } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.cols = new ArrayList(_list941.size); - FieldSchema _elem942; - for (int _i943 = 0; _i943 < _list941.size; ++_i943) + org.apache.thrift.protocol.TList _list951 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.cols = new ArrayList(_list951.size); + FieldSchema _elem952; + for (int _i953 = 0; _i953 < _list951.size; ++_i953) { - _elem942 = new FieldSchema(); - _elem942.read(iprot); - struct.cols.add(_elem942); + _elem952 = new FieldSchema(); + _elem952.read(iprot); + struct.cols.add(_elem952); } } struct.setColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetBasicStatsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetBasicStatsRequest.java new file mode 100644 index 0000000000..739e4d9c13 --- /dev/null +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetBasicStatsRequest.java @@ -0,0 +1,862 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SetBasicStatsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SetBasicStatsRequest"); + + private static final org.apache.thrift.protocol.TField DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("desc", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField IS_VALID_FIELD_DESC = new org.apache.thrift.protocol.TField("isValid", org.apache.thrift.protocol.TType.BOOL, (short)2); + private static final org.apache.thrift.protocol.TField LEGACY_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("legacyStats", org.apache.thrift.protocol.TType.MAP, (short)3); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)4); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new SetBasicStatsRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new SetBasicStatsRequestTupleSchemeFactory()); + } + + private ColumnStatisticsDesc desc; // required + private boolean isValid; // required + private Map legacyStats; // optional + private long writeId; // optional + private String validWriteIdList; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DESC((short)1, "desc"), + IS_VALID((short)2, "isValid"), + LEGACY_STATS((short)3, "legacyStats"), + WRITE_ID((short)4, "writeId"), + VALID_WRITE_ID_LIST((short)5, "validWriteIdList"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DESC + return DESC; + case 2: // IS_VALID + return IS_VALID; + case 3: // LEGACY_STATS + return LEGACY_STATS; + case 4: // WRITE_ID + return WRITE_ID; + case 5: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __ISVALID_ISSET_ID = 0; + private static final int __WRITEID_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.LEGACY_STATS,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DESC, new org.apache.thrift.meta_data.FieldMetaData("desc", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsDesc.class))); + tmpMap.put(_Fields.IS_VALID, new org.apache.thrift.meta_data.FieldMetaData("isValid", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.LEGACY_STATS, new org.apache.thrift.meta_data.FieldMetaData("legacyStats", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SetBasicStatsRequest.class, metaDataMap); + } + + public SetBasicStatsRequest() { + this.writeId = -1L; + + } + + public SetBasicStatsRequest( + ColumnStatisticsDesc desc, + boolean isValid) + { + this(); + this.desc = desc; + this.isValid = isValid; + setIsValidIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public SetBasicStatsRequest(SetBasicStatsRequest other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetDesc()) { + this.desc = new ColumnStatisticsDesc(other.desc); + } + this.isValid = other.isValid; + if (other.isSetLegacyStats()) { + Map __this__legacyStats = new HashMap(other.legacyStats); + this.legacyStats = __this__legacyStats; + } + this.writeId = other.writeId; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } + } + + public SetBasicStatsRequest deepCopy() { + return new SetBasicStatsRequest(this); + } + + @Override + public void clear() { + this.desc = null; + setIsValidIsSet(false); + this.isValid = false; + this.legacyStats = null; + this.writeId = -1L; + + this.validWriteIdList = null; + } + + public ColumnStatisticsDesc getDesc() { + return this.desc; + } + + public void setDesc(ColumnStatisticsDesc desc) { + this.desc = desc; + } + + public void unsetDesc() { + this.desc = null; + } + + /** Returns true if field desc is set (has been assigned a value) and false otherwise */ + public boolean isSetDesc() { + return this.desc != null; + } + + public void setDescIsSet(boolean value) { + if (!value) { + this.desc = null; + } + } + + public boolean isIsValid() { + return this.isValid; + } + + public void setIsValid(boolean isValid) { + this.isValid = isValid; + setIsValidIsSet(true); + } + + public void unsetIsValid() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISVALID_ISSET_ID); + } + + /** Returns true if field isValid is set (has been assigned a value) and false otherwise */ + public boolean isSetIsValid() { + return EncodingUtils.testBit(__isset_bitfield, __ISVALID_ISSET_ID); + } + + public void setIsValidIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISVALID_ISSET_ID, value); + } + + public int getLegacyStatsSize() { + return (this.legacyStats == null) ? 0 : this.legacyStats.size(); + } + + public void putToLegacyStats(String key, String val) { + if (this.legacyStats == null) { + this.legacyStats = new HashMap(); + } + this.legacyStats.put(key, val); + } + + public Map getLegacyStats() { + return this.legacyStats; + } + + public void setLegacyStats(Map legacyStats) { + this.legacyStats = legacyStats; + } + + public void unsetLegacyStats() { + this.legacyStats = null; + } + + /** Returns true if field legacyStats is set (has been assigned a value) and false otherwise */ + public boolean isSetLegacyStats() { + return this.legacyStats != null; + } + + public void setLegacyStatsIsSet(boolean value) { + if (!value) { + this.legacyStats = null; + } + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DESC: + if (value == null) { + unsetDesc(); + } else { + setDesc((ColumnStatisticsDesc)value); + } + break; + + case IS_VALID: + if (value == null) { + unsetIsValid(); + } else { + setIsValid((Boolean)value); + } + break; + + case LEGACY_STATS: + if (value == null) { + unsetLegacyStats(); + } else { + setLegacyStats((Map)value); + } + break; + + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DESC: + return getDesc(); + + case IS_VALID: + return isIsValid(); + + case LEGACY_STATS: + return getLegacyStats(); + + case WRITE_ID: + return getWriteId(); + + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DESC: + return isSetDesc(); + case IS_VALID: + return isSetIsValid(); + case LEGACY_STATS: + return isSetLegacyStats(); + case WRITE_ID: + return isSetWriteId(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof SetBasicStatsRequest) + return this.equals((SetBasicStatsRequest)that); + return false; + } + + public boolean equals(SetBasicStatsRequest that) { + if (that == null) + return false; + + boolean this_present_desc = true && this.isSetDesc(); + boolean that_present_desc = true && that.isSetDesc(); + if (this_present_desc || that_present_desc) { + if (!(this_present_desc && that_present_desc)) + return false; + if (!this.desc.equals(that.desc)) + return false; + } + + boolean this_present_isValid = true; + boolean that_present_isValid = true; + if (this_present_isValid || that_present_isValid) { + if (!(this_present_isValid && that_present_isValid)) + return false; + if (this.isValid != that.isValid) + return false; + } + + boolean this_present_legacyStats = true && this.isSetLegacyStats(); + boolean that_present_legacyStats = true && that.isSetLegacyStats(); + if (this_present_legacyStats || that_present_legacyStats) { + if (!(this_present_legacyStats && that_present_legacyStats)) + return false; + if (!this.legacyStats.equals(that.legacyStats)) + return false; + } + + boolean this_present_writeId = true && this.isSetWriteId(); + boolean that_present_writeId = true && that.isSetWriteId(); + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_desc = true && (isSetDesc()); + list.add(present_desc); + if (present_desc) + list.add(desc); + + boolean present_isValid = true; + list.add(present_isValid); + if (present_isValid) + list.add(isValid); + + boolean present_legacyStats = true && (isSetLegacyStats()); + list.add(present_legacyStats); + if (present_legacyStats) + list.add(legacyStats); + + boolean present_writeId = true && (isSetWriteId()); + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + + return list.hashCode(); + } + + @Override + public int compareTo(SetBasicStatsRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDesc()).compareTo(other.isSetDesc()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDesc()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.desc, other.desc); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIsValid()).compareTo(other.isSetIsValid()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsValid()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isValid, other.isValid); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetLegacyStats()).compareTo(other.isSetLegacyStats()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetLegacyStats()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.legacyStats, other.legacyStats); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("SetBasicStatsRequest("); + boolean first = true; + + sb.append("desc:"); + if (this.desc == null) { + sb.append("null"); + } else { + sb.append(this.desc); + } + first = false; + if (!first) sb.append(", "); + sb.append("isValid:"); + sb.append(this.isValid); + first = false; + if (isSetLegacyStats()) { + if (!first) sb.append(", "); + sb.append("legacyStats:"); + if (this.legacyStats == null) { + sb.append("null"); + } else { + sb.append(this.legacyStats); + } + first = false; + } + if (isSetWriteId()) { + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDesc()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'desc' is unset! Struct:" + toString()); + } + + if (!isSetIsValid()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'isValid' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (desc != null) { + desc.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class SetBasicStatsRequestStandardSchemeFactory implements SchemeFactory { + public SetBasicStatsRequestStandardScheme getScheme() { + return new SetBasicStatsRequestStandardScheme(); + } + } + + private static class SetBasicStatsRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, SetBasicStatsRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DESC + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.desc = new ColumnStatisticsDesc(); + struct.desc.read(iprot); + struct.setDescIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // IS_VALID + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.isValid = iprot.readBool(); + struct.setIsValidIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // LEGACY_STATS + if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { + { + org.apache.thrift.protocol.TMap _map300 = iprot.readMapBegin(); + struct.legacyStats = new HashMap(2*_map300.size); + String _key301; + String _val302; + for (int _i303 = 0; _i303 < _map300.size; ++_i303) + { + _key301 = iprot.readString(); + _val302 = iprot.readString(); + struct.legacyStats.put(_key301, _val302); + } + iprot.readMapEnd(); + } + struct.setLegacyStatsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, SetBasicStatsRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.desc != null) { + oprot.writeFieldBegin(DESC_FIELD_DESC); + struct.desc.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(IS_VALID_FIELD_DESC); + oprot.writeBool(struct.isValid); + oprot.writeFieldEnd(); + if (struct.legacyStats != null) { + if (struct.isSetLegacyStats()) { + oprot.writeFieldBegin(LEGACY_STATS_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.legacyStats.size())); + for (Map.Entry _iter304 : struct.legacyStats.entrySet()) + { + oprot.writeString(_iter304.getKey()); + oprot.writeString(_iter304.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + } + if (struct.isSetWriteId()) { + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class SetBasicStatsRequestTupleSchemeFactory implements SchemeFactory { + public SetBasicStatsRequestTupleScheme getScheme() { + return new SetBasicStatsRequestTupleScheme(); + } + } + + private static class SetBasicStatsRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, SetBasicStatsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + struct.desc.write(oprot); + oprot.writeBool(struct.isValid); + BitSet optionals = new BitSet(); + if (struct.isSetLegacyStats()) { + optionals.set(0); + } + if (struct.isSetWriteId()) { + optionals.set(1); + } + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetLegacyStats()) { + { + oprot.writeI32(struct.legacyStats.size()); + for (Map.Entry _iter305 : struct.legacyStats.entrySet()) + { + oprot.writeString(_iter305.getKey()); + oprot.writeString(_iter305.getValue()); + } + } + } + if (struct.isSetWriteId()) { + oprot.writeI64(struct.writeId); + } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, SetBasicStatsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.desc = new ColumnStatisticsDesc(); + struct.desc.read(iprot); + struct.setDescIsSet(true); + struct.isValid = iprot.readBool(); + struct.setIsValidIsSet(true); + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TMap _map306 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.legacyStats = new HashMap(2*_map306.size); + String _key307; + String _val308; + for (int _i309 = 0; _i309 < _map306.size; ++_i309) + { + _key307 = iprot.readString(); + _val308 = iprot.readString(); + struct.legacyStats.put(_key307, _val308); + } + } + struct.setLegacyStatsIsSet(true); + } + if (incoming.get(1)) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } + } + } + +} + diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetBasicStatsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetBasicStatsResponse.java new file mode 100644 index 0000000000..f98cb60450 --- /dev/null +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetBasicStatsResponse.java @@ -0,0 +1,387 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SetBasicStatsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SetBasicStatsResponse"); + + private static final org.apache.thrift.protocol.TField RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("result", org.apache.thrift.protocol.TType.BOOL, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new SetBasicStatsResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new SetBasicStatsResponseTupleSchemeFactory()); + } + + private boolean result; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RESULT((short)1, "result"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RESULT + return RESULT; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __RESULT_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RESULT, new org.apache.thrift.meta_data.FieldMetaData("result", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SetBasicStatsResponse.class, metaDataMap); + } + + public SetBasicStatsResponse() { + } + + public SetBasicStatsResponse( + boolean result) + { + this(); + this.result = result; + setResultIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public SetBasicStatsResponse(SetBasicStatsResponse other) { + __isset_bitfield = other.__isset_bitfield; + this.result = other.result; + } + + public SetBasicStatsResponse deepCopy() { + return new SetBasicStatsResponse(this); + } + + @Override + public void clear() { + setResultIsSet(false); + this.result = false; + } + + public boolean isResult() { + return this.result; + } + + public void setResult(boolean result) { + this.result = result; + setResultIsSet(true); + } + + public void unsetResult() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __RESULT_ISSET_ID); + } + + /** Returns true if field result is set (has been assigned a value) and false otherwise */ + public boolean isSetResult() { + return EncodingUtils.testBit(__isset_bitfield, __RESULT_ISSET_ID); + } + + public void setResultIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RESULT_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RESULT: + if (value == null) { + unsetResult(); + } else { + setResult((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RESULT: + return isResult(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RESULT: + return isSetResult(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof SetBasicStatsResponse) + return this.equals((SetBasicStatsResponse)that); + return false; + } + + public boolean equals(SetBasicStatsResponse that) { + if (that == null) + return false; + + boolean this_present_result = true; + boolean that_present_result = true; + if (this_present_result || that_present_result) { + if (!(this_present_result && that_present_result)) + return false; + if (this.result != that.result) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_result = true; + list.add(present_result); + if (present_result) + list.add(result); + + return list.hashCode(); + } + + @Override + public int compareTo(SetBasicStatsResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetResult()).compareTo(other.isSetResult()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetResult()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.result, other.result); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("SetBasicStatsResponse("); + boolean first = true; + + sb.append("result:"); + sb.append(this.result); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetResult()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'result' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class SetBasicStatsResponseStandardSchemeFactory implements SchemeFactory { + public SetBasicStatsResponseStandardScheme getScheme() { + return new SetBasicStatsResponseStandardScheme(); + } + } + + private static class SetBasicStatsResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, SetBasicStatsResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RESULT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.result = iprot.readBool(); + struct.setResultIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, SetBasicStatsResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(RESULT_FIELD_DESC); + oprot.writeBool(struct.result); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class SetBasicStatsResponseTupleSchemeFactory implements SchemeFactory { + public SetBasicStatsResponseTupleScheme getScheme() { + return new SetBasicStatsResponseTupleScheme(); + } + } + + private static class SetBasicStatsResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, SetBasicStatsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeBool(struct.result); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, SetBasicStatsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.result = iprot.readBool(); + struct.setResultIsSet(true); + } + } + +} + diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java index c58885f22a..49f22bd01d 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse case 1: // COMPACTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list708 = iprot.readListBegin(); - struct.compacts = new ArrayList(_list708.size); - ShowCompactResponseElement _elem709; - for (int _i710 = 0; _i710 < _list708.size; ++_i710) + org.apache.thrift.protocol.TList _list718 = iprot.readListBegin(); + struct.compacts = new ArrayList(_list718.size); + ShowCompactResponseElement _elem719; + for (int _i720 = 0; _i720 < _list718.size; ++_i720) { - _elem709 = new ShowCompactResponseElement(); - _elem709.read(iprot); - struct.compacts.add(_elem709); + _elem719 = new ShowCompactResponseElement(); + _elem719.read(iprot); + struct.compacts.add(_elem719); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeFieldBegin(COMPACTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.compacts.size())); - for (ShowCompactResponseElement _iter711 : struct.compacts) + for (ShowCompactResponseElement _iter721 : struct.compacts) { - _iter711.write(oprot); + _iter721.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.compacts.size()); - for (ShowCompactResponseElement _iter712 : struct.compacts) + for (ShowCompactResponseElement _iter722 : struct.compacts) { - _iter712.write(oprot); + _iter722.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.compacts = new ArrayList(_list713.size); - ShowCompactResponseElement _elem714; - for (int _i715 = 0; _i715 < _list713.size; ++_i715) + org.apache.thrift.protocol.TList _list723 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.compacts = new ArrayList(_list723.size); + ShowCompactResponseElement _elem724; + for (int _i725 = 0; _i725 < _list723.size; ++_i725) { - _elem714 = new ShowCompactResponseElement(); - _elem714.read(iprot); - struct.compacts.add(_elem714); + _elem724 = new ShowCompactResponseElement(); + _elem724.read(iprot); + struct.compacts.add(_elem724); } } struct.setCompactsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java index 4bbc8e7766..f54f197606 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowLocksResponse s case 1: // LOCKS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list674 = iprot.readListBegin(); - struct.locks = new ArrayList(_list674.size); - ShowLocksResponseElement _elem675; - for (int _i676 = 0; _i676 < _list674.size; ++_i676) + org.apache.thrift.protocol.TList _list684 = iprot.readListBegin(); + struct.locks = new ArrayList(_list684.size); + ShowLocksResponseElement _elem685; + for (int _i686 = 0; _i686 < _list684.size; ++_i686) { - _elem675 = new ShowLocksResponseElement(); - _elem675.read(iprot); - struct.locks.add(_elem675); + _elem685 = new ShowLocksResponseElement(); + _elem685.read(iprot); + struct.locks.add(_elem685); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowLocksResponse oprot.writeFieldBegin(LOCKS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.locks.size())); - for (ShowLocksResponseElement _iter677 : struct.locks) + for (ShowLocksResponseElement _iter687 : struct.locks) { - _iter677.write(oprot); + _iter687.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse s if (struct.isSetLocks()) { { oprot.writeI32(struct.locks.size()); - for (ShowLocksResponseElement _iter678 : struct.locks) + for (ShowLocksResponseElement _iter688 : struct.locks) { - _iter678.write(oprot); + _iter688.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list679 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.locks = new ArrayList(_list679.size); - ShowLocksResponseElement _elem680; - for (int _i681 = 0; _i681 < _list679.size; ++_i681) + org.apache.thrift.protocol.TList _list689 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.locks = new ArrayList(_list689.size); + ShowLocksResponseElement _elem690; + for (int _i691 = 0; _i691 < _list689.size; ++_i691) { - _elem680 = new ShowLocksResponseElement(); - _elem680.read(iprot); - struct.locks.add(_elem680); + _elem690 = new ShowLocksResponseElement(); + _elem690.read(iprot); + struct.locks.add(_elem690); } } struct.setLocksIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java index ae3a6e2483..543549b5a4 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java @@ -700,13 +700,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsRequest s case 3: // COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list458 = iprot.readListBegin(); - struct.colNames = new ArrayList(_list458.size); - String _elem459; - for (int _i460 = 0; _i460 < _list458.size; ++_i460) + org.apache.thrift.protocol.TList _list468 = iprot.readListBegin(); + struct.colNames = new ArrayList(_list468.size); + String _elem469; + for (int _i470 = 0; _i470 < _list468.size; ++_i470) { - _elem459 = iprot.readString(); - struct.colNames.add(_elem459); + _elem469 = iprot.readString(); + struct.colNames.add(_elem469); } iprot.readListEnd(); } @@ -758,9 +758,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsRequest oprot.writeFieldBegin(COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size())); - for (String _iter461 : struct.colNames) + for (String _iter471 : struct.colNames) { - oprot.writeString(_iter461); + oprot.writeString(_iter471); } oprot.writeListEnd(); } @@ -801,9 +801,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest s oprot.writeString(struct.tblName); { oprot.writeI32(struct.colNames.size()); - for (String _iter462 : struct.colNames) + for (String _iter472 : struct.colNames) { - oprot.writeString(_iter462); + oprot.writeString(_iter472); } } BitSet optionals = new BitSet(); @@ -830,13 +830,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest st struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list463 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.colNames = new ArrayList(_list463.size); - String _elem464; - for (int _i465 = 0; _i465 < _list463.size; ++_i465) + org.apache.thrift.protocol.TList _list473 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.colNames = new ArrayList(_list473.size); + String _elem474; + for (int _i475 = 0; _i475 < _list473.size; ++_i475) { - _elem464 = iprot.readString(); - struct.colNames.add(_elem464); + _elem474 = iprot.readString(); + struct.colNames.add(_elem474); } } struct.setColNamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java index b57f4bf6b2..fad9981bf6 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java @@ -39,7 +39,6 @@ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableStatsResult"); private static final org.apache.thrift.protocol.TField TABLE_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("tableStats", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -48,12 +47,10 @@ } private List tableStats; // required - private boolean isStatsCompliant; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TABLE_STATS((short)1, "tableStats"), - IS_STATS_COMPLIANT((short)2, "isStatsCompliant"); + TABLE_STATS((short)1, "tableStats"); private static final Map byName = new HashMap(); @@ -70,8 +67,6 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // TABLE_STATS return TABLE_STATS; - case 2: // IS_STATS_COMPLIANT - return IS_STATS_COMPLIANT; default: return null; } @@ -112,17 +107,12 @@ public String getFieldName() { } // isset id assignments - private static final int __ISSTATSCOMPLIANT_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.TABLE_STATS, new org.apache.thrift.meta_data.FieldMetaData("tableStats", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))); - tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsResult.class, metaDataMap); } @@ -141,7 +131,6 @@ public TableStatsResult( * Performs a deep copy on other. */ public TableStatsResult(TableStatsResult other) { - __isset_bitfield = other.__isset_bitfield; if (other.isSetTableStats()) { List __this__tableStats = new ArrayList(other.tableStats.size()); for (ColumnStatisticsObj other_element : other.tableStats) { @@ -149,7 +138,6 @@ public TableStatsResult(TableStatsResult other) { } this.tableStats = __this__tableStats; } - this.isStatsCompliant = other.isStatsCompliant; } public TableStatsResult deepCopy() { @@ -159,8 +147,6 @@ public TableStatsResult deepCopy() { @Override public void clear() { this.tableStats = null; - setIsStatsCompliantIsSet(false); - this.isStatsCompliant = false; } public int getTableStatsSize() { @@ -201,28 +187,6 @@ public void setTableStatsIsSet(boolean value) { } } - public boolean isIsStatsCompliant() { - return this.isStatsCompliant; - } - - public void setIsStatsCompliant(boolean isStatsCompliant) { - this.isStatsCompliant = isStatsCompliant; - setIsStatsCompliantIsSet(true); - } - - public void unsetIsStatsCompliant() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); - } - - /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */ - public boolean isSetIsStatsCompliant() { - return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID); - } - - public void setIsStatsCompliantIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value); - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_STATS: @@ -233,14 +197,6 @@ public void setFieldValue(_Fields field, Object value) { } break; - case IS_STATS_COMPLIANT: - if (value == null) { - unsetIsStatsCompliant(); - } else { - setIsStatsCompliant((Boolean)value); - } - break; - } } @@ -249,9 +205,6 @@ public Object getFieldValue(_Fields field) { case TABLE_STATS: return getTableStats(); - case IS_STATS_COMPLIANT: - return isIsStatsCompliant(); - } throw new IllegalStateException(); } @@ -265,8 +218,6 @@ public boolean isSet(_Fields field) { switch (field) { case TABLE_STATS: return isSetTableStats(); - case IS_STATS_COMPLIANT: - return isSetIsStatsCompliant(); } throw new IllegalStateException(); } @@ -293,15 +244,6 @@ public boolean equals(TableStatsResult that) { return false; } - boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant(); - boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant(); - if (this_present_isStatsCompliant || that_present_isStatsCompliant) { - if (!(this_present_isStatsCompliant && that_present_isStatsCompliant)) - return false; - if (this.isStatsCompliant != that.isStatsCompliant) - return false; - } - return true; } @@ -314,11 +256,6 @@ public int hashCode() { if (present_tableStats) list.add(tableStats); - boolean present_isStatsCompliant = true && (isSetIsStatsCompliant()); - list.add(present_isStatsCompliant); - if (present_isStatsCompliant) - list.add(isStatsCompliant); - return list.hashCode(); } @@ -340,16 +277,6 @@ public int compareTo(TableStatsResult other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetIsStatsCompliant()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -377,12 +304,6 @@ public String toString() { sb.append(this.tableStats); } first = false; - if (isSetIsStatsCompliant()) { - if (!first) sb.append(", "); - sb.append("isStatsCompliant:"); - sb.append(this.isStatsCompliant); - first = false; - } sb.append(")"); return sb.toString(); } @@ -406,8 +327,6 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -435,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsResult st case 1: // TABLE_STATS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list432 = iprot.readListBegin(); - struct.tableStats = new ArrayList(_list432.size); - ColumnStatisticsObj _elem433; - for (int _i434 = 0; _i434 < _list432.size; ++_i434) + org.apache.thrift.protocol.TList _list442 = iprot.readListBegin(); + struct.tableStats = new ArrayList(_list442.size); + ColumnStatisticsObj _elem443; + for (int _i444 = 0; _i444 < _list442.size; ++_i444) { - _elem433 = new ColumnStatisticsObj(); - _elem433.read(iprot); - struct.tableStats.add(_elem433); + _elem443 = new ColumnStatisticsObj(); + _elem443.read(iprot); + struct.tableStats.add(_elem443); } iprot.readListEnd(); } @@ -451,14 +370,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsResult st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // IS_STATS_COMPLIANT - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.isStatsCompliant = iprot.readBool(); - struct.setIsStatsCompliantIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -476,19 +387,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsResult s oprot.writeFieldBegin(TABLE_STATS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tableStats.size())); - for (ColumnStatisticsObj _iter435 : struct.tableStats) + for (ColumnStatisticsObj _iter445 : struct.tableStats) { - _iter435.write(oprot); + _iter445.write(oprot); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } - if (struct.isSetIsStatsCompliant()) { - oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC); - oprot.writeBool(struct.isStatsCompliant); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -508,41 +414,28 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tableStats.size()); - for (ColumnStatisticsObj _iter436 : struct.tableStats) + for (ColumnStatisticsObj _iter446 : struct.tableStats) { - _iter436.write(oprot); + _iter446.write(oprot); } } - BitSet optionals = new BitSet(); - if (struct.isSetIsStatsCompliant()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetIsStatsCompliant()) { - oprot.writeBool(struct.isStatsCompliant); - } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list437 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tableStats = new ArrayList(_list437.size); - ColumnStatisticsObj _elem438; - for (int _i439 = 0; _i439 < _list437.size; ++_i439) + org.apache.thrift.protocol.TList _list447 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tableStats = new ArrayList(_list447.size); + ColumnStatisticsObj _elem448; + for (int _i449 = 0; _i449 < _list447.size; ++_i449) { - _elem438 = new ColumnStatisticsObj(); - _elem438.read(iprot); - struct.tableStats.add(_elem438); + _elem448 = new ColumnStatisticsObj(); + _elem448.read(iprot); + struct.tableStats.add(_elem448); } } struct.setTableStatsIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.isStatsCompliant = iprot.readBool(); - struct.setIsStatsCompliantIsSet(true); - } } } diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java index 9c6ee51b26..375448ee21 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java @@ -708,13 +708,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableValidWriteIds case 3: // INVALID_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list626 = iprot.readListBegin(); - struct.invalidWriteIds = new ArrayList(_list626.size); - long _elem627; - for (int _i628 = 0; _i628 < _list626.size; ++_i628) + org.apache.thrift.protocol.TList _list636 = iprot.readListBegin(); + struct.invalidWriteIds = new ArrayList(_list636.size); + long _elem637; + for (int _i638 = 0; _i638 < _list636.size; ++_i638) { - _elem627 = iprot.readI64(); - struct.invalidWriteIds.add(_elem627); + _elem637 = iprot.readI64(); + struct.invalidWriteIds.add(_elem637); } iprot.readListEnd(); } @@ -764,9 +764,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableValidWriteIds oprot.writeFieldBegin(INVALID_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.invalidWriteIds.size())); - for (long _iter629 : struct.invalidWriteIds) + for (long _iter639 : struct.invalidWriteIds) { - oprot.writeI64(_iter629); + oprot.writeI64(_iter639); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableValidWriteIds oprot.writeI64(struct.writeIdHighWaterMark); { oprot.writeI32(struct.invalidWriteIds.size()); - for (long _iter630 : struct.invalidWriteIds) + for (long _iter640 : struct.invalidWriteIds) { - oprot.writeI64(_iter630); + oprot.writeI64(_iter640); } } oprot.writeBinary(struct.abortedBits); @@ -827,13 +827,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableValidWriteIds s struct.writeIdHighWaterMark = iprot.readI64(); struct.setWriteIdHighWaterMarkIsSet(true); { - org.apache.thrift.protocol.TList _list631 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.invalidWriteIds = new ArrayList(_list631.size); - long _elem632; - for (int _i633 = 0; _i633 < _list631.size; ++_i633) + org.apache.thrift.protocol.TList _list641 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.invalidWriteIds = new ArrayList(_list641.size); + long _elem642; + for (int _i643 = 0; _i643 < _list641.size; ++_i643) { - _elem632 = iprot.readI64(); - struct.invalidWriteIds.add(_elem632); + _elem642 = iprot.readI64(); + struct.invalidWriteIds.add(_elem642); } } struct.setInvalidWriteIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 7ab64eadac..a6f052f4e6 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -254,6 +254,12 @@ public SetPartitionsStatsResponse update_partition_column_statistics_req(SetPartitionsStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; + public SetBasicStatsResponse update_table_basic_statistics_req(SetBasicStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; + + public SetBasicStatsResponse update_partition_basic_statistics_req(SetBasicStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; + + public InvalidateColumnStatsResponse invalidate_all_column_statistics_req(InvalidateColumnStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; + public ColumnStatistics get_table_column_statistics(String db_name, String tbl_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException; public ColumnStatistics get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException; @@ -686,6 +692,12 @@ public void update_partition_column_statistics_req(SetPartitionsStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void update_table_basic_statistics_req(SetBasicStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void update_partition_basic_statistics_req(SetBasicStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void invalidate_all_column_statistics_req(InvalidateColumnStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -4134,6 +4146,111 @@ public SetPartitionsStatsResponse recv_update_partition_column_statistics_req() throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "update_partition_column_statistics_req failed: unknown result"); } + public SetBasicStatsResponse update_table_basic_statistics_req(SetBasicStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException + { + send_update_table_basic_statistics_req(req); + return recv_update_table_basic_statistics_req(); + } + + public void send_update_table_basic_statistics_req(SetBasicStatsRequest req) throws org.apache.thrift.TException + { + update_table_basic_statistics_req_args args = new update_table_basic_statistics_req_args(); + args.setReq(req); + sendBase("update_table_basic_statistics_req", args); + } + + public SetBasicStatsResponse recv_update_table_basic_statistics_req() throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException + { + update_table_basic_statistics_req_result result = new update_table_basic_statistics_req_result(); + receiveBase(result, "update_table_basic_statistics_req"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + if (result.o4 != null) { + throw result.o4; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "update_table_basic_statistics_req failed: unknown result"); + } + + public SetBasicStatsResponse update_partition_basic_statistics_req(SetBasicStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException + { + send_update_partition_basic_statistics_req(req); + return recv_update_partition_basic_statistics_req(); + } + + public void send_update_partition_basic_statistics_req(SetBasicStatsRequest req) throws org.apache.thrift.TException + { + update_partition_basic_statistics_req_args args = new update_partition_basic_statistics_req_args(); + args.setReq(req); + sendBase("update_partition_basic_statistics_req", args); + } + + public SetBasicStatsResponse recv_update_partition_basic_statistics_req() throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException + { + update_partition_basic_statistics_req_result result = new update_partition_basic_statistics_req_result(); + receiveBase(result, "update_partition_basic_statistics_req"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + if (result.o4 != null) { + throw result.o4; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "update_partition_basic_statistics_req failed: unknown result"); + } + + public InvalidateColumnStatsResponse invalidate_all_column_statistics_req(InvalidateColumnStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException + { + send_invalidate_all_column_statistics_req(req); + return recv_invalidate_all_column_statistics_req(); + } + + public void send_invalidate_all_column_statistics_req(InvalidateColumnStatsRequest req) throws org.apache.thrift.TException + { + invalidate_all_column_statistics_req_args args = new invalidate_all_column_statistics_req_args(); + args.setReq(req); + sendBase("invalidate_all_column_statistics_req", args); + } + + public InvalidateColumnStatsResponse recv_invalidate_all_column_statistics_req() throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException + { + invalidate_all_column_statistics_req_result result = new invalidate_all_column_statistics_req_result(); + receiveBase(result, "invalidate_all_column_statistics_req"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + if (result.o4 != null) { + throw result.o4; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "invalidate_all_column_statistics_req failed: unknown result"); + } + public ColumnStatistics get_table_column_statistics(String db_name, String tbl_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException { send_get_table_column_statistics(db_name, tbl_name, col_name); @@ -10890,6 +11007,102 @@ public SetPartitionsStatsResponse getResult() throws NoSuchObjectException, Inva } } + public void update_table_basic_statistics_req(SetBasicStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + update_table_basic_statistics_req_call method_call = new update_table_basic_statistics_req_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_basic_statistics_req_call extends org.apache.thrift.async.TAsyncMethodCall { + private SetBasicStatsRequest req; + public update_table_basic_statistics_req_call(SetBasicStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("update_table_basic_statistics_req", org.apache.thrift.protocol.TMessageType.CALL, 0)); + update_table_basic_statistics_req_args args = new update_table_basic_statistics_req_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public SetBasicStatsResponse getResult() throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_update_table_basic_statistics_req(); + } + } + + public void update_partition_basic_statistics_req(SetBasicStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + update_partition_basic_statistics_req_call method_call = new update_partition_basic_statistics_req_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_basic_statistics_req_call extends org.apache.thrift.async.TAsyncMethodCall { + private SetBasicStatsRequest req; + public update_partition_basic_statistics_req_call(SetBasicStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("update_partition_basic_statistics_req", org.apache.thrift.protocol.TMessageType.CALL, 0)); + update_partition_basic_statistics_req_args args = new update_partition_basic_statistics_req_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public SetBasicStatsResponse getResult() throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_update_partition_basic_statistics_req(); + } + } + + public void invalidate_all_column_statistics_req(InvalidateColumnStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + invalidate_all_column_statistics_req_call method_call = new invalidate_all_column_statistics_req_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class invalidate_all_column_statistics_req_call extends org.apache.thrift.async.TAsyncMethodCall { + private InvalidateColumnStatsRequest req; + public invalidate_all_column_statistics_req_call(InvalidateColumnStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("invalidate_all_column_statistics_req", org.apache.thrift.protocol.TMessageType.CALL, 0)); + invalidate_all_column_statistics_req_args args = new invalidate_all_column_statistics_req_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public InvalidateColumnStatsResponse getResult() throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_invalidate_all_column_statistics_req(); + } + } + public void get_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); get_table_column_statistics_call method_call = new get_table_column_statistics_call(db_name, tbl_name, col_name, resultHandler, this, ___protocolFactory, ___transport); @@ -14549,6 +14762,9 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public update_table_basic_statistics_req() { + super("update_table_basic_statistics_req"); + } + + public update_table_basic_statistics_req_args getEmptyArgsInstance() { + return new update_table_basic_statistics_req_args(); + } + + protected boolean isOneway() { + return false; + } + + public update_table_basic_statistics_req_result getResult(I iface, update_table_basic_statistics_req_args args) throws org.apache.thrift.TException { + update_table_basic_statistics_req_result result = new update_table_basic_statistics_req_result(); + try { + result.success = iface.update_table_basic_statistics_req(args.req); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (InvalidObjectException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } catch (InvalidInputException o4) { + result.o4 = o4; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_basic_statistics_req extends org.apache.thrift.ProcessFunction { + public update_partition_basic_statistics_req() { + super("update_partition_basic_statistics_req"); + } + + public update_partition_basic_statistics_req_args getEmptyArgsInstance() { + return new update_partition_basic_statistics_req_args(); + } + + protected boolean isOneway() { + return false; + } + + public update_partition_basic_statistics_req_result getResult(I iface, update_partition_basic_statistics_req_args args) throws org.apache.thrift.TException { + update_partition_basic_statistics_req_result result = new update_partition_basic_statistics_req_result(); + try { + result.success = iface.update_partition_basic_statistics_req(args.req); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (InvalidObjectException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } catch (InvalidInputException o4) { + result.o4 = o4; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class invalidate_all_column_statistics_req extends org.apache.thrift.ProcessFunction { + public invalidate_all_column_statistics_req() { + super("invalidate_all_column_statistics_req"); + } + + public invalidate_all_column_statistics_req_args getEmptyArgsInstance() { + return new invalidate_all_column_statistics_req_args(); + } + + protected boolean isOneway() { + return false; + } + + public invalidate_all_column_statistics_req_result getResult(I iface, invalidate_all_column_statistics_req_args args) throws org.apache.thrift.TException { + invalidate_all_column_statistics_req_result result = new invalidate_all_column_statistics_req_result(); + try { + result.success = iface.invalidate_all_column_statistics_req(args.req); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (InvalidObjectException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } catch (InvalidInputException o4) { + result.o4 = o4; + } + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_column_statistics extends org.apache.thrift.ProcessFunction { public get_table_column_statistics() { super("get_table_column_statistics"); @@ -20271,6 +20577,9 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { - public get_table_column_statistics() { - super("get_table_column_statistics"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_basic_statistics_req extends org.apache.thrift.AsyncProcessFunction { + public update_table_basic_statistics_req() { + super("update_table_basic_statistics_req"); } - public get_table_column_statistics_args getEmptyArgsInstance() { - return new get_table_column_statistics_args(); + public update_table_basic_statistics_req_args getEmptyArgsInstance() { + return new update_table_basic_statistics_req_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(ColumnStatistics o) { - get_table_column_statistics_result result = new get_table_column_statistics_result(); + return new AsyncMethodCallback() { + public void onComplete(SetBasicStatsResponse o) { + update_table_basic_statistics_req_result result = new update_table_basic_statistics_req_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -27112,24 +27421,24 @@ public void onComplete(ColumnStatistics o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_table_column_statistics_result result = new get_table_column_statistics_result(); + update_table_basic_statistics_req_result result = new update_table_basic_statistics_req_result(); if (e instanceof NoSuchObjectException) { result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; + else if (e instanceof InvalidObjectException) { + result.o2 = (InvalidObjectException) e; result.setO2IsSet(true); msg = result; } - else if (e instanceof InvalidInputException) { - result.o3 = (InvalidInputException) e; + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; result.setO3IsSet(true); msg = result; } - else if (e instanceof InvalidObjectException) { - result.o4 = (InvalidObjectException) e; + else if (e instanceof InvalidInputException) { + result.o4 = (InvalidInputException) e; result.setO4IsSet(true); msg = result; } @@ -27153,25 +27462,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_table_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name,resultHandler); + public void start(I iface, update_table_basic_statistics_req_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.update_table_basic_statistics_req(args.req,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_column_statistics extends org.apache.thrift.AsyncProcessFunction { - public get_partition_column_statistics() { - super("get_partition_column_statistics"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_basic_statistics_req extends org.apache.thrift.AsyncProcessFunction { + public update_partition_basic_statistics_req() { + super("update_partition_basic_statistics_req"); } - public get_partition_column_statistics_args getEmptyArgsInstance() { - return new get_partition_column_statistics_args(); + public update_partition_basic_statistics_req_args getEmptyArgsInstance() { + return new update_partition_basic_statistics_req_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(ColumnStatistics o) { - get_partition_column_statistics_result result = new get_partition_column_statistics_result(); + return new AsyncMethodCallback() { + public void onComplete(SetBasicStatsResponse o) { + update_partition_basic_statistics_req_result result = new update_partition_basic_statistics_req_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -27184,24 +27493,240 @@ public void onComplete(ColumnStatistics o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partition_column_statistics_result result = new get_partition_column_statistics_result(); + update_partition_basic_statistics_req_result result = new update_partition_basic_statistics_req_result(); if (e instanceof NoSuchObjectException) { result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; + else if (e instanceof InvalidObjectException) { + result.o2 = (InvalidObjectException) e; result.setO2IsSet(true); msg = result; } - else if (e instanceof InvalidInputException) { - result.o3 = (InvalidInputException) e; + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; result.setO3IsSet(true); msg = result; } - else if (e instanceof InvalidObjectException) { - result.o4 = (InvalidObjectException) e; + else if (e instanceof InvalidInputException) { + result.o4 = (InvalidInputException) e; + result.setO4IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, update_partition_basic_statistics_req_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.update_partition_basic_statistics_req(args.req,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class invalidate_all_column_statistics_req extends org.apache.thrift.AsyncProcessFunction { + public invalidate_all_column_statistics_req() { + super("invalidate_all_column_statistics_req"); + } + + public invalidate_all_column_statistics_req_args getEmptyArgsInstance() { + return new invalidate_all_column_statistics_req_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(InvalidateColumnStatsResponse o) { + invalidate_all_column_statistics_req_result result = new invalidate_all_column_statistics_req_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + invalidate_all_column_statistics_req_result result = new invalidate_all_column_statistics_req_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o2 = (InvalidObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else if (e instanceof InvalidInputException) { + result.o4 = (InvalidInputException) e; + result.setO4IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, invalidate_all_column_statistics_req_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.invalidate_all_column_statistics_req(args.req,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_column_statistics extends org.apache.thrift.AsyncProcessFunction { + public get_table_column_statistics() { + super("get_table_column_statistics"); + } + + public get_table_column_statistics_args getEmptyArgsInstance() { + return new get_table_column_statistics_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(ColumnStatistics o) { + get_table_column_statistics_result result = new get_table_column_statistics_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_table_column_statistics_result result = new get_table_column_statistics_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof InvalidInputException) { + result.o3 = (InvalidInputException) e; + result.setO3IsSet(true); + msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o4 = (InvalidObjectException) e; + result.setO4IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_table_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_column_statistics extends org.apache.thrift.AsyncProcessFunction { + public get_partition_column_statistics() { + super("get_partition_column_statistics"); + } + + public get_partition_column_statistics_args getEmptyArgsInstance() { + return new get_partition_column_statistics_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(ColumnStatistics o) { + get_partition_column_statistics_result result = new get_partition_column_statistics_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_partition_column_statistics_result result = new get_partition_column_statistics_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof InvalidInputException) { + result.o3 = (InvalidInputException) e; + result.setO3IsSet(true); + msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o4 = (InvalidObjectException) e; result.setO4IsSet(true); msg = result; } @@ -43344,13 +43869,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list968 = iprot.readListBegin(); - struct.success = new ArrayList(_list968.size); - String _elem969; - for (int _i970 = 0; _i970 < _list968.size; ++_i970) + org.apache.thrift.protocol.TList _list978 = iprot.readListBegin(); + struct.success = new ArrayList(_list978.size); + String _elem979; + for (int _i980 = 0; _i980 < _list978.size; ++_i980) { - _elem969 = iprot.readString(); - struct.success.add(_elem969); + _elem979 = iprot.readString(); + struct.success.add(_elem979); } iprot.readListEnd(); } @@ -43385,9 +43910,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter971 : struct.success) + for (String _iter981 : struct.success) { - oprot.writeString(_iter971); + oprot.writeString(_iter981); } oprot.writeListEnd(); } @@ -43426,9 +43951,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter972 : struct.success) + for (String _iter982 : struct.success) { - oprot.writeString(_iter972); + oprot.writeString(_iter982); } } } @@ -43443,13 +43968,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list973.size); - String _elem974; - for (int _i975 = 0; _i975 < _list973.size; ++_i975) + org.apache.thrift.protocol.TList _list983 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list983.size); + String _elem984; + for (int _i985 = 0; _i985 < _list983.size; ++_i985) { - _elem974 = iprot.readString(); - struct.success.add(_elem974); + _elem984 = iprot.readString(); + struct.success.add(_elem984); } } struct.setSuccessIsSet(true); @@ -44103,13 +44628,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list976 = iprot.readListBegin(); - struct.success = new ArrayList(_list976.size); - String _elem977; - for (int _i978 = 0; _i978 < _list976.size; ++_i978) + org.apache.thrift.protocol.TList _list986 = iprot.readListBegin(); + struct.success = new ArrayList(_list986.size); + String _elem987; + for (int _i988 = 0; _i988 < _list986.size; ++_i988) { - _elem977 = iprot.readString(); - struct.success.add(_elem977); + _elem987 = iprot.readString(); + struct.success.add(_elem987); } iprot.readListEnd(); } @@ -44144,9 +44669,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter979 : struct.success) + for (String _iter989 : struct.success) { - oprot.writeString(_iter979); + oprot.writeString(_iter989); } oprot.writeListEnd(); } @@ -44185,9 +44710,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter980 : struct.success) + for (String _iter990 : struct.success) { - oprot.writeString(_iter980); + oprot.writeString(_iter990); } } } @@ -44202,13 +44727,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list981.size); - String _elem982; - for (int _i983 = 0; _i983 < _list981.size; ++_i983) + org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list991.size); + String _elem992; + for (int _i993 = 0; _i993 < _list991.size; ++_i993) { - _elem982 = iprot.readString(); - struct.success.add(_elem982); + _elem992 = iprot.readString(); + struct.success.add(_elem992); } } struct.setSuccessIsSet(true); @@ -48815,16 +49340,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map984 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map984.size); - String _key985; - Type _val986; - for (int _i987 = 0; _i987 < _map984.size; ++_i987) + org.apache.thrift.protocol.TMap _map994 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map994.size); + String _key995; + Type _val996; + for (int _i997 = 0; _i997 < _map994.size; ++_i997) { - _key985 = iprot.readString(); - _val986 = new Type(); - _val986.read(iprot); - struct.success.put(_key985, _val986); + _key995 = iprot.readString(); + _val996 = new Type(); + _val996.read(iprot); + struct.success.put(_key995, _val996); } iprot.readMapEnd(); } @@ -48859,10 +49384,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter988 : struct.success.entrySet()) + for (Map.Entry _iter998 : struct.success.entrySet()) { - oprot.writeString(_iter988.getKey()); - _iter988.getValue().write(oprot); + oprot.writeString(_iter998.getKey()); + _iter998.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -48901,10 +49426,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter989 : struct.success.entrySet()) + for (Map.Entry _iter999 : struct.success.entrySet()) { - oprot.writeString(_iter989.getKey()); - _iter989.getValue().write(oprot); + oprot.writeString(_iter999.getKey()); + _iter999.getValue().write(oprot); } } } @@ -48919,16 +49444,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map990 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map990.size); - String _key991; - Type _val992; - for (int _i993 = 0; _i993 < _map990.size; ++_i993) + org.apache.thrift.protocol.TMap _map1000 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map1000.size); + String _key1001; + Type _val1002; + for (int _i1003 = 0; _i1003 < _map1000.size; ++_i1003) { - _key991 = iprot.readString(); - _val992 = new Type(); - _val992.read(iprot); - struct.success.put(_key991, _val992); + _key1001 = iprot.readString(); + _val1002 = new Type(); + _val1002.read(iprot); + struct.success.put(_key1001, _val1002); } } struct.setSuccessIsSet(true); @@ -49963,14 +50488,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list994 = iprot.readListBegin(); - struct.success = new ArrayList(_list994.size); - FieldSchema _elem995; - for (int _i996 = 0; _i996 < _list994.size; ++_i996) + org.apache.thrift.protocol.TList _list1004 = iprot.readListBegin(); + struct.success = new ArrayList(_list1004.size); + FieldSchema _elem1005; + for (int _i1006 = 0; _i1006 < _list1004.size; ++_i1006) { - _elem995 = new FieldSchema(); - _elem995.read(iprot); - struct.success.add(_elem995); + _elem1005 = new FieldSchema(); + _elem1005.read(iprot); + struct.success.add(_elem1005); } iprot.readListEnd(); } @@ -50023,9 +50548,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter997 : struct.success) + for (FieldSchema _iter1007 : struct.success) { - _iter997.write(oprot); + _iter1007.write(oprot); } oprot.writeListEnd(); } @@ -50080,9 +50605,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter998 : struct.success) + for (FieldSchema _iter1008 : struct.success) { - _iter998.write(oprot); + _iter1008.write(oprot); } } } @@ -50103,14 +50628,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list999.size); - FieldSchema _elem1000; - for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001) + org.apache.thrift.protocol.TList _list1009 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1009.size); + FieldSchema _elem1010; + for (int _i1011 = 0; _i1011 < _list1009.size; ++_i1011) { - _elem1000 = new FieldSchema(); - _elem1000.read(iprot); - struct.success.add(_elem1000); + _elem1010 = new FieldSchema(); + _elem1010.read(iprot); + struct.success.add(_elem1010); } } struct.setSuccessIsSet(true); @@ -51264,14 +51789,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin(); - struct.success = new ArrayList(_list1002.size); - FieldSchema _elem1003; - for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004) + org.apache.thrift.protocol.TList _list1012 = iprot.readListBegin(); + struct.success = new ArrayList(_list1012.size); + FieldSchema _elem1013; + for (int _i1014 = 0; _i1014 < _list1012.size; ++_i1014) { - _elem1003 = new FieldSchema(); - _elem1003.read(iprot); - struct.success.add(_elem1003); + _elem1013 = new FieldSchema(); + _elem1013.read(iprot); + struct.success.add(_elem1013); } iprot.readListEnd(); } @@ -51324,9 +51849,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1005 : struct.success) + for (FieldSchema _iter1015 : struct.success) { - _iter1005.write(oprot); + _iter1015.write(oprot); } oprot.writeListEnd(); } @@ -51381,9 +51906,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1006 : struct.success) + for (FieldSchema _iter1016 : struct.success) { - _iter1006.write(oprot); + _iter1016.write(oprot); } } } @@ -51404,14 +51929,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1007.size); - FieldSchema _elem1008; - for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009) + org.apache.thrift.protocol.TList _list1017 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1017.size); + FieldSchema _elem1018; + for (int _i1019 = 0; _i1019 < _list1017.size; ++_i1019) { - _elem1008 = new FieldSchema(); - _elem1008.read(iprot); - struct.success.add(_elem1008); + _elem1018 = new FieldSchema(); + _elem1018.read(iprot); + struct.success.add(_elem1018); } } struct.setSuccessIsSet(true); @@ -52456,14 +52981,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin(); - struct.success = new ArrayList(_list1010.size); - FieldSchema _elem1011; - for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012) + org.apache.thrift.protocol.TList _list1020 = iprot.readListBegin(); + struct.success = new ArrayList(_list1020.size); + FieldSchema _elem1021; + for (int _i1022 = 0; _i1022 < _list1020.size; ++_i1022) { - _elem1011 = new FieldSchema(); - _elem1011.read(iprot); - struct.success.add(_elem1011); + _elem1021 = new FieldSchema(); + _elem1021.read(iprot); + struct.success.add(_elem1021); } iprot.readListEnd(); } @@ -52516,9 +53041,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1013 : struct.success) + for (FieldSchema _iter1023 : struct.success) { - _iter1013.write(oprot); + _iter1023.write(oprot); } oprot.writeListEnd(); } @@ -52573,9 +53098,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1014 : struct.success) + for (FieldSchema _iter1024 : struct.success) { - _iter1014.write(oprot); + _iter1024.write(oprot); } } } @@ -52596,14 +53121,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1015.size); - FieldSchema _elem1016; - for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017) + org.apache.thrift.protocol.TList _list1025 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1025.size); + FieldSchema _elem1026; + for (int _i1027 = 0; _i1027 < _list1025.size; ++_i1027) { - _elem1016 = new FieldSchema(); - _elem1016.read(iprot); - struct.success.add(_elem1016); + _elem1026 = new FieldSchema(); + _elem1026.read(iprot); + struct.success.add(_elem1026); } } struct.setSuccessIsSet(true); @@ -53757,14 +54282,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin(); - struct.success = new ArrayList(_list1018.size); - FieldSchema _elem1019; - for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020) + org.apache.thrift.protocol.TList _list1028 = iprot.readListBegin(); + struct.success = new ArrayList(_list1028.size); + FieldSchema _elem1029; + for (int _i1030 = 0; _i1030 < _list1028.size; ++_i1030) { - _elem1019 = new FieldSchema(); - _elem1019.read(iprot); - struct.success.add(_elem1019); + _elem1029 = new FieldSchema(); + _elem1029.read(iprot); + struct.success.add(_elem1029); } iprot.readListEnd(); } @@ -53817,9 +54342,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1021 : struct.success) + for (FieldSchema _iter1031 : struct.success) { - _iter1021.write(oprot); + _iter1031.write(oprot); } oprot.writeListEnd(); } @@ -53874,9 +54399,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1022 : struct.success) + for (FieldSchema _iter1032 : struct.success) { - _iter1022.write(oprot); + _iter1032.write(oprot); } } } @@ -53897,14 +54422,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1023 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1023.size); - FieldSchema _elem1024; - for (int _i1025 = 0; _i1025 < _list1023.size; ++_i1025) + org.apache.thrift.protocol.TList _list1033 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1033.size); + FieldSchema _elem1034; + for (int _i1035 = 0; _i1035 < _list1033.size; ++_i1035) { - _elem1024 = new FieldSchema(); - _elem1024.read(iprot); - struct.success.add(_elem1024); + _elem1034 = new FieldSchema(); + _elem1034.read(iprot); + struct.success.add(_elem1034); } } struct.setSuccessIsSet(true); @@ -57033,14 +57558,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1026 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list1026.size); - SQLPrimaryKey _elem1027; - for (int _i1028 = 0; _i1028 < _list1026.size; ++_i1028) + org.apache.thrift.protocol.TList _list1036 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list1036.size); + SQLPrimaryKey _elem1037; + for (int _i1038 = 0; _i1038 < _list1036.size; ++_i1038) { - _elem1027 = new SQLPrimaryKey(); - _elem1027.read(iprot); - struct.primaryKeys.add(_elem1027); + _elem1037 = new SQLPrimaryKey(); + _elem1037.read(iprot); + struct.primaryKeys.add(_elem1037); } iprot.readListEnd(); } @@ -57052,14 +57577,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1029 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list1029.size); - SQLForeignKey _elem1030; - for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031) + org.apache.thrift.protocol.TList _list1039 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list1039.size); + SQLForeignKey _elem1040; + for (int _i1041 = 0; _i1041 < _list1039.size; ++_i1041) { - _elem1030 = new SQLForeignKey(); - _elem1030.read(iprot); - struct.foreignKeys.add(_elem1030); + _elem1040 = new SQLForeignKey(); + _elem1040.read(iprot); + struct.foreignKeys.add(_elem1040); } iprot.readListEnd(); } @@ -57071,14 +57596,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list1032.size); - SQLUniqueConstraint _elem1033; - for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034) + org.apache.thrift.protocol.TList _list1042 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list1042.size); + SQLUniqueConstraint _elem1043; + for (int _i1044 = 0; _i1044 < _list1042.size; ++_i1044) { - _elem1033 = new SQLUniqueConstraint(); - _elem1033.read(iprot); - struct.uniqueConstraints.add(_elem1033); + _elem1043 = new SQLUniqueConstraint(); + _elem1043.read(iprot); + struct.uniqueConstraints.add(_elem1043); } iprot.readListEnd(); } @@ -57090,14 +57615,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1035 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list1035.size); - SQLNotNullConstraint _elem1036; - for (int _i1037 = 0; _i1037 < _list1035.size; ++_i1037) + org.apache.thrift.protocol.TList _list1045 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list1045.size); + SQLNotNullConstraint _elem1046; + for (int _i1047 = 0; _i1047 < _list1045.size; ++_i1047) { - _elem1036 = new SQLNotNullConstraint(); - _elem1036.read(iprot); - struct.notNullConstraints.add(_elem1036); + _elem1046 = new SQLNotNullConstraint(); + _elem1046.read(iprot); + struct.notNullConstraints.add(_elem1046); } iprot.readListEnd(); } @@ -57109,14 +57634,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 6: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1038 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list1038.size); - SQLDefaultConstraint _elem1039; - for (int _i1040 = 0; _i1040 < _list1038.size; ++_i1040) + org.apache.thrift.protocol.TList _list1048 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list1048.size); + SQLDefaultConstraint _elem1049; + for (int _i1050 = 0; _i1050 < _list1048.size; ++_i1050) { - _elem1039 = new SQLDefaultConstraint(); - _elem1039.read(iprot); - struct.defaultConstraints.add(_elem1039); + _elem1049 = new SQLDefaultConstraint(); + _elem1049.read(iprot); + struct.defaultConstraints.add(_elem1049); } iprot.readListEnd(); } @@ -57128,14 +57653,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 7: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1041 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList(_list1041.size); - SQLCheckConstraint _elem1042; - for (int _i1043 = 0; _i1043 < _list1041.size; ++_i1043) + org.apache.thrift.protocol.TList _list1051 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list1051.size); + SQLCheckConstraint _elem1052; + for (int _i1053 = 0; _i1053 < _list1051.size; ++_i1053) { - _elem1042 = new SQLCheckConstraint(); - _elem1042.read(iprot); - struct.checkConstraints.add(_elem1042); + _elem1052 = new SQLCheckConstraint(); + _elem1052.read(iprot); + struct.checkConstraints.add(_elem1052); } iprot.readListEnd(); } @@ -57166,9 +57691,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter1044 : struct.primaryKeys) + for (SQLPrimaryKey _iter1054 : struct.primaryKeys) { - _iter1044.write(oprot); + _iter1054.write(oprot); } oprot.writeListEnd(); } @@ -57178,9 +57703,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter1045 : struct.foreignKeys) + for (SQLForeignKey _iter1055 : struct.foreignKeys) { - _iter1045.write(oprot); + _iter1055.write(oprot); } oprot.writeListEnd(); } @@ -57190,9 +57715,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter1046 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1056 : struct.uniqueConstraints) { - _iter1046.write(oprot); + _iter1056.write(oprot); } oprot.writeListEnd(); } @@ -57202,9 +57727,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter1047 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1057 : struct.notNullConstraints) { - _iter1047.write(oprot); + _iter1057.write(oprot); } oprot.writeListEnd(); } @@ -57214,9 +57739,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter1048 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1058 : struct.defaultConstraints) { - _iter1048.write(oprot); + _iter1058.write(oprot); } oprot.writeListEnd(); } @@ -57226,9 +57751,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter1049 : struct.checkConstraints) + for (SQLCheckConstraint _iter1059 : struct.checkConstraints) { - _iter1049.write(oprot); + _iter1059.write(oprot); } oprot.writeListEnd(); } @@ -57280,54 +57805,54 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter1050 : struct.primaryKeys) + for (SQLPrimaryKey _iter1060 : struct.primaryKeys) { - _iter1050.write(oprot); + _iter1060.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter1051 : struct.foreignKeys) + for (SQLForeignKey _iter1061 : struct.foreignKeys) { - _iter1051.write(oprot); + _iter1061.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter1052 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1062 : struct.uniqueConstraints) { - _iter1052.write(oprot); + _iter1062.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter1053 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1063 : struct.notNullConstraints) { - _iter1053.write(oprot); + _iter1063.write(oprot); } } } if (struct.isSetDefaultConstraints()) { { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter1054 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1064 : struct.defaultConstraints) { - _iter1054.write(oprot); + _iter1064.write(oprot); } } } if (struct.isSetCheckConstraints()) { { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter1055 : struct.checkConstraints) + for (SQLCheckConstraint _iter1065 : struct.checkConstraints) { - _iter1055.write(oprot); + _iter1065.write(oprot); } } } @@ -57344,84 +57869,84 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1056 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list1056.size); - SQLPrimaryKey _elem1057; - for (int _i1058 = 0; _i1058 < _list1056.size; ++_i1058) + org.apache.thrift.protocol.TList _list1066 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list1066.size); + SQLPrimaryKey _elem1067; + for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068) { - _elem1057 = new SQLPrimaryKey(); - _elem1057.read(iprot); - struct.primaryKeys.add(_elem1057); + _elem1067 = new SQLPrimaryKey(); + _elem1067.read(iprot); + struct.primaryKeys.add(_elem1067); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1059 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list1059.size); - SQLForeignKey _elem1060; - for (int _i1061 = 0; _i1061 < _list1059.size; ++_i1061) + org.apache.thrift.protocol.TList _list1069 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list1069.size); + SQLForeignKey _elem1070; + for (int _i1071 = 0; _i1071 < _list1069.size; ++_i1071) { - _elem1060 = new SQLForeignKey(); - _elem1060.read(iprot); - struct.foreignKeys.add(_elem1060); + _elem1070 = new SQLForeignKey(); + _elem1070.read(iprot); + struct.foreignKeys.add(_elem1070); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list1062 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list1062.size); - SQLUniqueConstraint _elem1063; - for (int _i1064 = 0; _i1064 < _list1062.size; ++_i1064) + org.apache.thrift.protocol.TList _list1072 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list1072.size); + SQLUniqueConstraint _elem1073; + for (int _i1074 = 0; _i1074 < _list1072.size; ++_i1074) { - _elem1063 = new SQLUniqueConstraint(); - _elem1063.read(iprot); - struct.uniqueConstraints.add(_elem1063); + _elem1073 = new SQLUniqueConstraint(); + _elem1073.read(iprot); + struct.uniqueConstraints.add(_elem1073); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1065 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list1065.size); - SQLNotNullConstraint _elem1066; - for (int _i1067 = 0; _i1067 < _list1065.size; ++_i1067) + org.apache.thrift.protocol.TList _list1075 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list1075.size); + SQLNotNullConstraint _elem1076; + for (int _i1077 = 0; _i1077 < _list1075.size; ++_i1077) { - _elem1066 = new SQLNotNullConstraint(); - _elem1066.read(iprot); - struct.notNullConstraints.add(_elem1066); + _elem1076 = new SQLNotNullConstraint(); + _elem1076.read(iprot); + struct.notNullConstraints.add(_elem1076); } } struct.setNotNullConstraintsIsSet(true); } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1068 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list1068.size); - SQLDefaultConstraint _elem1069; - for (int _i1070 = 0; _i1070 < _list1068.size; ++_i1070) + org.apache.thrift.protocol.TList _list1078 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list1078.size); + SQLDefaultConstraint _elem1079; + for (int _i1080 = 0; _i1080 < _list1078.size; ++_i1080) { - _elem1069 = new SQLDefaultConstraint(); - _elem1069.read(iprot); - struct.defaultConstraints.add(_elem1069); + _elem1079 = new SQLDefaultConstraint(); + _elem1079.read(iprot); + struct.defaultConstraints.add(_elem1079); } } struct.setDefaultConstraintsIsSet(true); } if (incoming.get(6)) { { - org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraints = new ArrayList(_list1071.size); - SQLCheckConstraint _elem1072; - for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073) + org.apache.thrift.protocol.TList _list1081 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list1081.size); + SQLCheckConstraint _elem1082; + for (int _i1083 = 0; _i1083 < _list1081.size; ++_i1083) { - _elem1072 = new SQLCheckConstraint(); - _elem1072.read(iprot); - struct.checkConstraints.add(_elem1072); + _elem1082 = new SQLCheckConstraint(); + _elem1082.read(iprot); + struct.checkConstraints.add(_elem1082); } } struct.setCheckConstraintsIsSet(true); @@ -66571,13 +67096,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args case 3: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list1074.size); - String _elem1075; - for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076) + org.apache.thrift.protocol.TList _list1084 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list1084.size); + String _elem1085; + for (int _i1086 = 0; _i1086 < _list1084.size; ++_i1086) { - _elem1075 = iprot.readString(); - struct.partNames.add(_elem1075); + _elem1085 = iprot.readString(); + struct.partNames.add(_elem1085); } iprot.readListEnd(); } @@ -66613,9 +67138,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter1077 : struct.partNames) + for (String _iter1087 : struct.partNames) { - oprot.writeString(_iter1077); + oprot.writeString(_iter1087); } oprot.writeListEnd(); } @@ -66658,9 +67183,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter1078 : struct.partNames) + for (String _iter1088 : struct.partNames) { - oprot.writeString(_iter1078); + oprot.writeString(_iter1088); } } } @@ -66680,13 +67205,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list1079.size); - String _elem1080; - for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081) + org.apache.thrift.protocol.TList _list1089 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list1089.size); + String _elem1090; + for (int _i1091 = 0; _i1091 < _list1089.size; ++_i1091) { - _elem1080 = iprot.readString(); - struct.partNames.add(_elem1080); + _elem1090 = iprot.readString(); + struct.partNames.add(_elem1090); } } struct.setPartNamesIsSet(true); @@ -68743,13 +69268,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin(); - struct.success = new ArrayList(_list1082.size); - String _elem1083; - for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084) + org.apache.thrift.protocol.TList _list1092 = iprot.readListBegin(); + struct.success = new ArrayList(_list1092.size); + String _elem1093; + for (int _i1094 = 0; _i1094 < _list1092.size; ++_i1094) { - _elem1083 = iprot.readString(); - struct.success.add(_elem1083); + _elem1093 = iprot.readString(); + struct.success.add(_elem1093); } iprot.readListEnd(); } @@ -68784,9 +69309,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1085 : struct.success) + for (String _iter1095 : struct.success) { - oprot.writeString(_iter1085); + oprot.writeString(_iter1095); } oprot.writeListEnd(); } @@ -68825,9 +69350,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1086 : struct.success) + for (String _iter1096 : struct.success) { - oprot.writeString(_iter1086); + oprot.writeString(_iter1096); } } } @@ -68842,13 +69367,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1087.size); - String _elem1088; - for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089) + org.apache.thrift.protocol.TList _list1097 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1097.size); + String _elem1098; + for (int _i1099 = 0; _i1099 < _list1097.size; ++_i1099) { - _elem1088 = iprot.readString(); - struct.success.add(_elem1088); + _elem1098 = iprot.readString(); + struct.success.add(_elem1098); } } struct.setSuccessIsSet(true); @@ -69822,13 +70347,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin(); - struct.success = new ArrayList(_list1090.size); - String _elem1091; - for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092) + org.apache.thrift.protocol.TList _list1100 = iprot.readListBegin(); + struct.success = new ArrayList(_list1100.size); + String _elem1101; + for (int _i1102 = 0; _i1102 < _list1100.size; ++_i1102) { - _elem1091 = iprot.readString(); - struct.success.add(_elem1091); + _elem1101 = iprot.readString(); + struct.success.add(_elem1101); } iprot.readListEnd(); } @@ -69863,9 +70388,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1093 : struct.success) + for (String _iter1103 : struct.success) { - oprot.writeString(_iter1093); + oprot.writeString(_iter1103); } oprot.writeListEnd(); } @@ -69904,9 +70429,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1094 : struct.success) + for (String _iter1104 : struct.success) { - oprot.writeString(_iter1094); + oprot.writeString(_iter1104); } } } @@ -69921,13 +70446,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1095.size); - String _elem1096; - for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097) + org.apache.thrift.protocol.TList _list1105 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1105.size); + String _elem1106; + for (int _i1107 = 0; _i1107 < _list1105.size; ++_i1107) { - _elem1096 = iprot.readString(); - struct.success.add(_elem1096); + _elem1106 = iprot.readString(); + struct.success.add(_elem1106); } } struct.setSuccessIsSet(true); @@ -70693,13 +71218,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin(); - struct.success = new ArrayList(_list1098.size); - String _elem1099; - for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100) + org.apache.thrift.protocol.TList _list1108 = iprot.readListBegin(); + struct.success = new ArrayList(_list1108.size); + String _elem1109; + for (int _i1110 = 0; _i1110 < _list1108.size; ++_i1110) { - _elem1099 = iprot.readString(); - struct.success.add(_elem1099); + _elem1109 = iprot.readString(); + struct.success.add(_elem1109); } iprot.readListEnd(); } @@ -70734,9 +71259,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1101 : struct.success) + for (String _iter1111 : struct.success) { - oprot.writeString(_iter1101); + oprot.writeString(_iter1111); } oprot.writeListEnd(); } @@ -70775,9 +71300,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1102 : struct.success) + for (String _iter1112 : struct.success) { - oprot.writeString(_iter1102); + oprot.writeString(_iter1112); } } } @@ -70792,13 +71317,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1103.size); - String _elem1104; - for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105) + org.apache.thrift.protocol.TList _list1113 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1113.size); + String _elem1114; + for (int _i1115 = 0; _i1115 < _list1113.size; ++_i1115) { - _elem1104 = iprot.readString(); - struct.success.add(_elem1104); + _elem1114 = iprot.readString(); + struct.success.add(_elem1114); } } struct.setSuccessIsSet(true); @@ -71303,13 +71828,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list1106.size); - String _elem1107; - for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108) + org.apache.thrift.protocol.TList _list1116 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list1116.size); + String _elem1117; + for (int _i1118 = 0; _i1118 < _list1116.size; ++_i1118) { - _elem1107 = iprot.readString(); - struct.tbl_types.add(_elem1107); + _elem1117 = iprot.readString(); + struct.tbl_types.add(_elem1117); } iprot.readListEnd(); } @@ -71345,9 +71870,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter1109 : struct.tbl_types) + for (String _iter1119 : struct.tbl_types) { - oprot.writeString(_iter1109); + oprot.writeString(_iter1119); } oprot.writeListEnd(); } @@ -71390,9 +71915,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter1110 : struct.tbl_types) + for (String _iter1120 : struct.tbl_types) { - oprot.writeString(_iter1110); + oprot.writeString(_iter1120); } } } @@ -71412,13 +71937,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list1111.size); - String _elem1112; - for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113) + org.apache.thrift.protocol.TList _list1121 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list1121.size); + String _elem1122; + for (int _i1123 = 0; _i1123 < _list1121.size; ++_i1123) { - _elem1112 = iprot.readString(); - struct.tbl_types.add(_elem1112); + _elem1122 = iprot.readString(); + struct.tbl_types.add(_elem1122); } } struct.setTbl_typesIsSet(true); @@ -71824,14 +72349,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin(); - struct.success = new ArrayList(_list1114.size); - TableMeta _elem1115; - for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116) + org.apache.thrift.protocol.TList _list1124 = iprot.readListBegin(); + struct.success = new ArrayList(_list1124.size); + TableMeta _elem1125; + for (int _i1126 = 0; _i1126 < _list1124.size; ++_i1126) { - _elem1115 = new TableMeta(); - _elem1115.read(iprot); - struct.success.add(_elem1115); + _elem1125 = new TableMeta(); + _elem1125.read(iprot); + struct.success.add(_elem1125); } iprot.readListEnd(); } @@ -71866,9 +72391,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter1117 : struct.success) + for (TableMeta _iter1127 : struct.success) { - _iter1117.write(oprot); + _iter1127.write(oprot); } oprot.writeListEnd(); } @@ -71907,9 +72432,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter1118 : struct.success) + for (TableMeta _iter1128 : struct.success) { - _iter1118.write(oprot); + _iter1128.write(oprot); } } } @@ -71924,14 +72449,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1119.size); - TableMeta _elem1120; - for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121) + org.apache.thrift.protocol.TList _list1129 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1129.size); + TableMeta _elem1130; + for (int _i1131 = 0; _i1131 < _list1129.size; ++_i1131) { - _elem1120 = new TableMeta(); - _elem1120.read(iprot); - struct.success.add(_elem1120); + _elem1130 = new TableMeta(); + _elem1130.read(iprot); + struct.success.add(_elem1130); } } struct.setSuccessIsSet(true); @@ -72697,13 +73222,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin(); - struct.success = new ArrayList(_list1122.size); - String _elem1123; - for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124) + org.apache.thrift.protocol.TList _list1132 = iprot.readListBegin(); + struct.success = new ArrayList(_list1132.size); + String _elem1133; + for (int _i1134 = 0; _i1134 < _list1132.size; ++_i1134) { - _elem1123 = iprot.readString(); - struct.success.add(_elem1123); + _elem1133 = iprot.readString(); + struct.success.add(_elem1133); } iprot.readListEnd(); } @@ -72738,9 +73263,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1125 : struct.success) + for (String _iter1135 : struct.success) { - oprot.writeString(_iter1125); + oprot.writeString(_iter1135); } oprot.writeListEnd(); } @@ -72779,9 +73304,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1126 : struct.success) + for (String _iter1136 : struct.success) { - oprot.writeString(_iter1126); + oprot.writeString(_iter1136); } } } @@ -72796,13 +73321,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1127.size); - String _elem1128; - for (int _i1129 = 0; _i1129 < _list1127.size; ++_i1129) + org.apache.thrift.protocol.TList _list1137 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1137.size); + String _elem1138; + for (int _i1139 = 0; _i1139 < _list1137.size; ++_i1139) { - _elem1128 = iprot.readString(); - struct.success.add(_elem1128); + _elem1138 = iprot.readString(); + struct.success.add(_elem1138); } } struct.setSuccessIsSet(true); @@ -74255,13 +74780,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1130 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1130.size); - String _elem1131; - for (int _i1132 = 0; _i1132 < _list1130.size; ++_i1132) + org.apache.thrift.protocol.TList _list1140 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1140.size); + String _elem1141; + for (int _i1142 = 0; _i1142 < _list1140.size; ++_i1142) { - _elem1131 = iprot.readString(); - struct.tbl_names.add(_elem1131); + _elem1141 = iprot.readString(); + struct.tbl_names.add(_elem1141); } iprot.readListEnd(); } @@ -74292,9 +74817,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1133 : struct.tbl_names) + for (String _iter1143 : struct.tbl_names) { - oprot.writeString(_iter1133); + oprot.writeString(_iter1143); } oprot.writeListEnd(); } @@ -74331,9 +74856,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1134 : struct.tbl_names) + for (String _iter1144 : struct.tbl_names) { - oprot.writeString(_iter1134); + oprot.writeString(_iter1144); } } } @@ -74349,13 +74874,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1135 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1135.size); - String _elem1136; - for (int _i1137 = 0; _i1137 < _list1135.size; ++_i1137) + org.apache.thrift.protocol.TList _list1145 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1145.size); + String _elem1146; + for (int _i1147 = 0; _i1147 < _list1145.size; ++_i1147) { - _elem1136 = iprot.readString(); - struct.tbl_names.add(_elem1136); + _elem1146 = iprot.readString(); + struct.tbl_names.add(_elem1146); } } struct.setTbl_namesIsSet(true); @@ -74680,14 +75205,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1138 = iprot.readListBegin(); - struct.success = new ArrayList
(_list1138.size); - Table _elem1139; - for (int _i1140 = 0; _i1140 < _list1138.size; ++_i1140) + org.apache.thrift.protocol.TList _list1148 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1148.size); + Table _elem1149; + for (int _i1150 = 0; _i1150 < _list1148.size; ++_i1150) { - _elem1139 = new Table(); - _elem1139.read(iprot); - struct.success.add(_elem1139); + _elem1149 = new Table(); + _elem1149.read(iprot); + struct.success.add(_elem1149); } iprot.readListEnd(); } @@ -74713,9 +75238,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1141 : struct.success) + for (Table _iter1151 : struct.success) { - _iter1141.write(oprot); + _iter1151.write(oprot); } oprot.writeListEnd(); } @@ -74746,9 +75271,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1142 : struct.success) + for (Table _iter1152 : struct.success) { - _iter1142.write(oprot); + _iter1152.write(oprot); } } } @@ -74760,14 +75285,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1143.size); - Table _elem1144; - for (int _i1145 = 0; _i1145 < _list1143.size; ++_i1145) + org.apache.thrift.protocol.TList _list1153 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1153.size); + Table _elem1154; + for (int _i1155 = 0; _i1155 < _list1153.size; ++_i1155) { - _elem1144 = new Table(); - _elem1144.read(iprot); - struct.success.add(_elem1144); + _elem1154 = new Table(); + _elem1154.read(iprot); + struct.success.add(_elem1154); } } struct.setSuccessIsSet(true); @@ -80275,13 +80800,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1146 = iprot.readListBegin(); - struct.success = new ArrayList(_list1146.size); - String _elem1147; - for (int _i1148 = 0; _i1148 < _list1146.size; ++_i1148) + org.apache.thrift.protocol.TList _list1156 = iprot.readListBegin(); + struct.success = new ArrayList(_list1156.size); + String _elem1157; + for (int _i1158 = 0; _i1158 < _list1156.size; ++_i1158) { - _elem1147 = iprot.readString(); - struct.success.add(_elem1147); + _elem1157 = iprot.readString(); + struct.success.add(_elem1157); } iprot.readListEnd(); } @@ -80334,9 +80859,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1149 : struct.success) + for (String _iter1159 : struct.success) { - oprot.writeString(_iter1149); + oprot.writeString(_iter1159); } oprot.writeListEnd(); } @@ -80391,9 +80916,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1150 : struct.success) + for (String _iter1160 : struct.success) { - oprot.writeString(_iter1150); + oprot.writeString(_iter1160); } } } @@ -80414,13 +80939,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1151 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1151.size); - String _elem1152; - for (int _i1153 = 0; _i1153 < _list1151.size; ++_i1153) + org.apache.thrift.protocol.TList _list1161 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1161.size); + String _elem1162; + for (int _i1163 = 0; _i1163 < _list1161.size; ++_i1163) { - _elem1152 = iprot.readString(); - struct.success.add(_elem1152); + _elem1162 = iprot.readString(); + struct.success.add(_elem1162); } } struct.setSuccessIsSet(true); @@ -87217,14 +87742,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1154 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1154.size); - Partition _elem1155; - for (int _i1156 = 0; _i1156 < _list1154.size; ++_i1156) + org.apache.thrift.protocol.TList _list1164 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1164.size); + Partition _elem1165; + for (int _i1166 = 0; _i1166 < _list1164.size; ++_i1166) { - _elem1155 = new Partition(); - _elem1155.read(iprot); - struct.new_parts.add(_elem1155); + _elem1165 = new Partition(); + _elem1165.read(iprot); + struct.new_parts.add(_elem1165); } iprot.readListEnd(); } @@ -87250,9 +87775,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1157 : struct.new_parts) + for (Partition _iter1167 : struct.new_parts) { - _iter1157.write(oprot); + _iter1167.write(oprot); } oprot.writeListEnd(); } @@ -87283,9 +87808,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1158 : struct.new_parts) + for (Partition _iter1168 : struct.new_parts) { - _iter1158.write(oprot); + _iter1168.write(oprot); } } } @@ -87297,14 +87822,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1159 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1159.size); - Partition _elem1160; - for (int _i1161 = 0; _i1161 < _list1159.size; ++_i1161) + org.apache.thrift.protocol.TList _list1169 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1169.size); + Partition _elem1170; + for (int _i1171 = 0; _i1171 < _list1169.size; ++_i1171) { - _elem1160 = new Partition(); - _elem1160.read(iprot); - struct.new_parts.add(_elem1160); + _elem1170 = new Partition(); + _elem1170.read(iprot); + struct.new_parts.add(_elem1170); } } struct.setNew_partsIsSet(true); @@ -88305,14 +88830,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1162 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1162.size); - PartitionSpec _elem1163; - for (int _i1164 = 0; _i1164 < _list1162.size; ++_i1164) + org.apache.thrift.protocol.TList _list1172 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1172.size); + PartitionSpec _elem1173; + for (int _i1174 = 0; _i1174 < _list1172.size; ++_i1174) { - _elem1163 = new PartitionSpec(); - _elem1163.read(iprot); - struct.new_parts.add(_elem1163); + _elem1173 = new PartitionSpec(); + _elem1173.read(iprot); + struct.new_parts.add(_elem1173); } iprot.readListEnd(); } @@ -88338,9 +88863,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1165 : struct.new_parts) + for (PartitionSpec _iter1175 : struct.new_parts) { - _iter1165.write(oprot); + _iter1175.write(oprot); } oprot.writeListEnd(); } @@ -88371,9 +88896,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1166 : struct.new_parts) + for (PartitionSpec _iter1176 : struct.new_parts) { - _iter1166.write(oprot); + _iter1176.write(oprot); } } } @@ -88385,14 +88910,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1167 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1167.size); - PartitionSpec _elem1168; - for (int _i1169 = 0; _i1169 < _list1167.size; ++_i1169) + org.apache.thrift.protocol.TList _list1177 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1177.size); + PartitionSpec _elem1178; + for (int _i1179 = 0; _i1179 < _list1177.size; ++_i1179) { - _elem1168 = new PartitionSpec(); - _elem1168.read(iprot); - struct.new_parts.add(_elem1168); + _elem1178 = new PartitionSpec(); + _elem1178.read(iprot); + struct.new_parts.add(_elem1178); } } struct.setNew_partsIsSet(true); @@ -89568,13 +90093,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1170 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1170.size); - String _elem1171; - for (int _i1172 = 0; _i1172 < _list1170.size; ++_i1172) + org.apache.thrift.protocol.TList _list1180 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1180.size); + String _elem1181; + for (int _i1182 = 0; _i1182 < _list1180.size; ++_i1182) { - _elem1171 = iprot.readString(); - struct.part_vals.add(_elem1171); + _elem1181 = iprot.readString(); + struct.part_vals.add(_elem1181); } iprot.readListEnd(); } @@ -89610,9 +90135,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1173 : struct.part_vals) + for (String _iter1183 : struct.part_vals) { - oprot.writeString(_iter1173); + oprot.writeString(_iter1183); } oprot.writeListEnd(); } @@ -89655,9 +90180,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1174 : struct.part_vals) + for (String _iter1184 : struct.part_vals) { - oprot.writeString(_iter1174); + oprot.writeString(_iter1184); } } } @@ -89677,13 +90202,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1175 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1175.size); - String _elem1176; - for (int _i1177 = 0; _i1177 < _list1175.size; ++_i1177) + org.apache.thrift.protocol.TList _list1185 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1185.size); + String _elem1186; + for (int _i1187 = 0; _i1187 < _list1185.size; ++_i1187) { - _elem1176 = iprot.readString(); - struct.part_vals.add(_elem1176); + _elem1186 = iprot.readString(); + struct.part_vals.add(_elem1186); } } struct.setPart_valsIsSet(true); @@ -91992,13 +92517,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1178 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1178.size); - String _elem1179; - for (int _i1180 = 0; _i1180 < _list1178.size; ++_i1180) + org.apache.thrift.protocol.TList _list1188 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1188.size); + String _elem1189; + for (int _i1190 = 0; _i1190 < _list1188.size; ++_i1190) { - _elem1179 = iprot.readString(); - struct.part_vals.add(_elem1179); + _elem1189 = iprot.readString(); + struct.part_vals.add(_elem1189); } iprot.readListEnd(); } @@ -92043,9 +92568,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1181 : struct.part_vals) + for (String _iter1191 : struct.part_vals) { - oprot.writeString(_iter1181); + oprot.writeString(_iter1191); } oprot.writeListEnd(); } @@ -92096,9 +92621,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1182 : struct.part_vals) + for (String _iter1192 : struct.part_vals) { - oprot.writeString(_iter1182); + oprot.writeString(_iter1192); } } } @@ -92121,13 +92646,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1183 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1183.size); - String _elem1184; - for (int _i1185 = 0; _i1185 < _list1183.size; ++_i1185) + org.apache.thrift.protocol.TList _list1193 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1193.size); + String _elem1194; + for (int _i1195 = 0; _i1195 < _list1193.size; ++_i1195) { - _elem1184 = iprot.readString(); - struct.part_vals.add(_elem1184); + _elem1194 = iprot.readString(); + struct.part_vals.add(_elem1194); } } struct.setPart_valsIsSet(true); @@ -95997,13 +96522,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1186 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1186.size); - String _elem1187; - for (int _i1188 = 0; _i1188 < _list1186.size; ++_i1188) + org.apache.thrift.protocol.TList _list1196 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1196.size); + String _elem1197; + for (int _i1198 = 0; _i1198 < _list1196.size; ++_i1198) { - _elem1187 = iprot.readString(); - struct.part_vals.add(_elem1187); + _elem1197 = iprot.readString(); + struct.part_vals.add(_elem1197); } iprot.readListEnd(); } @@ -96047,9 +96572,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1189 : struct.part_vals) + for (String _iter1199 : struct.part_vals) { - oprot.writeString(_iter1189); + oprot.writeString(_iter1199); } oprot.writeListEnd(); } @@ -96098,9 +96623,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1190 : struct.part_vals) + for (String _iter1200 : struct.part_vals) { - oprot.writeString(_iter1190); + oprot.writeString(_iter1200); } } } @@ -96123,13 +96648,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1191 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1191.size); - String _elem1192; - for (int _i1193 = 0; _i1193 < _list1191.size; ++_i1193) + org.apache.thrift.protocol.TList _list1201 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1201.size); + String _elem1202; + for (int _i1203 = 0; _i1203 < _list1201.size; ++_i1203) { - _elem1192 = iprot.readString(); - struct.part_vals.add(_elem1192); + _elem1202 = iprot.readString(); + struct.part_vals.add(_elem1202); } } struct.setPart_valsIsSet(true); @@ -97368,13 +97893,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1194 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1194.size); - String _elem1195; - for (int _i1196 = 0; _i1196 < _list1194.size; ++_i1196) + org.apache.thrift.protocol.TList _list1204 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1204.size); + String _elem1205; + for (int _i1206 = 0; _i1206 < _list1204.size; ++_i1206) { - _elem1195 = iprot.readString(); - struct.part_vals.add(_elem1195); + _elem1205 = iprot.readString(); + struct.part_vals.add(_elem1205); } iprot.readListEnd(); } @@ -97427,9 +97952,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1197 : struct.part_vals) + for (String _iter1207 : struct.part_vals) { - oprot.writeString(_iter1197); + oprot.writeString(_iter1207); } oprot.writeListEnd(); } @@ -97486,9 +98011,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1198 : struct.part_vals) + for (String _iter1208 : struct.part_vals) { - oprot.writeString(_iter1198); + oprot.writeString(_iter1208); } } } @@ -97514,13 +98039,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1199 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1199.size); - String _elem1200; - for (int _i1201 = 0; _i1201 < _list1199.size; ++_i1201) + org.apache.thrift.protocol.TList _list1209 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1209.size); + String _elem1210; + for (int _i1211 = 0; _i1211 < _list1209.size; ++_i1211) { - _elem1200 = iprot.readString(); - struct.part_vals.add(_elem1200); + _elem1210 = iprot.readString(); + struct.part_vals.add(_elem1210); } } struct.setPart_valsIsSet(true); @@ -102122,13 +102647,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1202 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1202.size); - String _elem1203; - for (int _i1204 = 0; _i1204 < _list1202.size; ++_i1204) + org.apache.thrift.protocol.TList _list1212 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1212.size); + String _elem1213; + for (int _i1214 = 0; _i1214 < _list1212.size; ++_i1214) { - _elem1203 = iprot.readString(); - struct.part_vals.add(_elem1203); + _elem1213 = iprot.readString(); + struct.part_vals.add(_elem1213); } iprot.readListEnd(); } @@ -102164,9 +102689,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1205 : struct.part_vals) + for (String _iter1215 : struct.part_vals) { - oprot.writeString(_iter1205); + oprot.writeString(_iter1215); } oprot.writeListEnd(); } @@ -102209,9 +102734,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1206 : struct.part_vals) + for (String _iter1216 : struct.part_vals) { - oprot.writeString(_iter1206); + oprot.writeString(_iter1216); } } } @@ -102231,13 +102756,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1207 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1207.size); - String _elem1208; - for (int _i1209 = 0; _i1209 < _list1207.size; ++_i1209) + org.apache.thrift.protocol.TList _list1217 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1217.size); + String _elem1218; + for (int _i1219 = 0; _i1219 < _list1217.size; ++_i1219) { - _elem1208 = iprot.readString(); - struct.part_vals.add(_elem1208); + _elem1218 = iprot.readString(); + struct.part_vals.add(_elem1218); } } struct.setPart_valsIsSet(true); @@ -103455,15 +103980,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1210 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1210.size); - String _key1211; - String _val1212; - for (int _i1213 = 0; _i1213 < _map1210.size; ++_i1213) + org.apache.thrift.protocol.TMap _map1220 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1220.size); + String _key1221; + String _val1222; + for (int _i1223 = 0; _i1223 < _map1220.size; ++_i1223) { - _key1211 = iprot.readString(); - _val1212 = iprot.readString(); - struct.partitionSpecs.put(_key1211, _val1212); + _key1221 = iprot.readString(); + _val1222 = iprot.readString(); + struct.partitionSpecs.put(_key1221, _val1222); } iprot.readMapEnd(); } @@ -103521,10 +104046,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1214 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1224 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1214.getKey()); - oprot.writeString(_iter1214.getValue()); + oprot.writeString(_iter1224.getKey()); + oprot.writeString(_iter1224.getValue()); } oprot.writeMapEnd(); } @@ -103587,10 +104112,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1215 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1225 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1215.getKey()); - oprot.writeString(_iter1215.getValue()); + oprot.writeString(_iter1225.getKey()); + oprot.writeString(_iter1225.getValue()); } } } @@ -103614,15 +104139,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1216 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1216.size); - String _key1217; - String _val1218; - for (int _i1219 = 0; _i1219 < _map1216.size; ++_i1219) + org.apache.thrift.protocol.TMap _map1226 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1226.size); + String _key1227; + String _val1228; + for (int _i1229 = 0; _i1229 < _map1226.size; ++_i1229) { - _key1217 = iprot.readString(); - _val1218 = iprot.readString(); - struct.partitionSpecs.put(_key1217, _val1218); + _key1227 = iprot.readString(); + _val1228 = iprot.readString(); + struct.partitionSpecs.put(_key1227, _val1228); } } struct.setPartitionSpecsIsSet(true); @@ -105068,15 +105593,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1220 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1220.size); - String _key1221; - String _val1222; - for (int _i1223 = 0; _i1223 < _map1220.size; ++_i1223) + org.apache.thrift.protocol.TMap _map1230 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1230.size); + String _key1231; + String _val1232; + for (int _i1233 = 0; _i1233 < _map1230.size; ++_i1233) { - _key1221 = iprot.readString(); - _val1222 = iprot.readString(); - struct.partitionSpecs.put(_key1221, _val1222); + _key1231 = iprot.readString(); + _val1232 = iprot.readString(); + struct.partitionSpecs.put(_key1231, _val1232); } iprot.readMapEnd(); } @@ -105134,10 +105659,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1224 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1234 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1224.getKey()); - oprot.writeString(_iter1224.getValue()); + oprot.writeString(_iter1234.getKey()); + oprot.writeString(_iter1234.getValue()); } oprot.writeMapEnd(); } @@ -105200,10 +105725,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1225 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1235 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1225.getKey()); - oprot.writeString(_iter1225.getValue()); + oprot.writeString(_iter1235.getKey()); + oprot.writeString(_iter1235.getValue()); } } } @@ -105227,15 +105752,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1226 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1226.size); - String _key1227; - String _val1228; - for (int _i1229 = 0; _i1229 < _map1226.size; ++_i1229) + org.apache.thrift.protocol.TMap _map1236 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1236.size); + String _key1237; + String _val1238; + for (int _i1239 = 0; _i1239 < _map1236.size; ++_i1239) { - _key1227 = iprot.readString(); - _val1228 = iprot.readString(); - struct.partitionSpecs.put(_key1227, _val1228); + _key1237 = iprot.readString(); + _val1238 = iprot.readString(); + struct.partitionSpecs.put(_key1237, _val1238); } } struct.setPartitionSpecsIsSet(true); @@ -105900,14 +106425,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1230 = iprot.readListBegin(); - struct.success = new ArrayList(_list1230.size); - Partition _elem1231; - for (int _i1232 = 0; _i1232 < _list1230.size; ++_i1232) + org.apache.thrift.protocol.TList _list1240 = iprot.readListBegin(); + struct.success = new ArrayList(_list1240.size); + Partition _elem1241; + for (int _i1242 = 0; _i1242 < _list1240.size; ++_i1242) { - _elem1231 = new Partition(); - _elem1231.read(iprot); - struct.success.add(_elem1231); + _elem1241 = new Partition(); + _elem1241.read(iprot); + struct.success.add(_elem1241); } iprot.readListEnd(); } @@ -105969,9 +106494,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1233 : struct.success) + for (Partition _iter1243 : struct.success) { - _iter1233.write(oprot); + _iter1243.write(oprot); } oprot.writeListEnd(); } @@ -106034,9 +106559,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1234 : struct.success) + for (Partition _iter1244 : struct.success) { - _iter1234.write(oprot); + _iter1244.write(oprot); } } } @@ -106060,14 +106585,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1235 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1235.size); - Partition _elem1236; - for (int _i1237 = 0; _i1237 < _list1235.size; ++_i1237) + org.apache.thrift.protocol.TList _list1245 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1245.size); + Partition _elem1246; + for (int _i1247 = 0; _i1247 < _list1245.size; ++_i1247) { - _elem1236 = new Partition(); - _elem1236.read(iprot); - struct.success.add(_elem1236); + _elem1246 = new Partition(); + _elem1246.read(iprot); + struct.success.add(_elem1246); } } struct.setSuccessIsSet(true); @@ -106766,13 +107291,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1238 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1238.size); - String _elem1239; - for (int _i1240 = 0; _i1240 < _list1238.size; ++_i1240) + org.apache.thrift.protocol.TList _list1248 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1248.size); + String _elem1249; + for (int _i1250 = 0; _i1250 < _list1248.size; ++_i1250) { - _elem1239 = iprot.readString(); - struct.part_vals.add(_elem1239); + _elem1249 = iprot.readString(); + struct.part_vals.add(_elem1249); } iprot.readListEnd(); } @@ -106792,13 +107317,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1241 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1241.size); - String _elem1242; - for (int _i1243 = 0; _i1243 < _list1241.size; ++_i1243) + org.apache.thrift.protocol.TList _list1251 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1251.size); + String _elem1252; + for (int _i1253 = 0; _i1253 < _list1251.size; ++_i1253) { - _elem1242 = iprot.readString(); - struct.group_names.add(_elem1242); + _elem1252 = iprot.readString(); + struct.group_names.add(_elem1252); } iprot.readListEnd(); } @@ -106834,9 +107359,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1244 : struct.part_vals) + for (String _iter1254 : struct.part_vals) { - oprot.writeString(_iter1244); + oprot.writeString(_iter1254); } oprot.writeListEnd(); } @@ -106851,9 +107376,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1245 : struct.group_names) + for (String _iter1255 : struct.group_names) { - oprot.writeString(_iter1245); + oprot.writeString(_iter1255); } oprot.writeListEnd(); } @@ -106902,9 +107427,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1246 : struct.part_vals) + for (String _iter1256 : struct.part_vals) { - oprot.writeString(_iter1246); + oprot.writeString(_iter1256); } } } @@ -106914,9 +107439,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1247 : struct.group_names) + for (String _iter1257 : struct.group_names) { - oprot.writeString(_iter1247); + oprot.writeString(_iter1257); } } } @@ -106936,13 +107461,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1248 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1248.size); - String _elem1249; - for (int _i1250 = 0; _i1250 < _list1248.size; ++_i1250) + org.apache.thrift.protocol.TList _list1258 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1258.size); + String _elem1259; + for (int _i1260 = 0; _i1260 < _list1258.size; ++_i1260) { - _elem1249 = iprot.readString(); - struct.part_vals.add(_elem1249); + _elem1259 = iprot.readString(); + struct.part_vals.add(_elem1259); } } struct.setPart_valsIsSet(true); @@ -106953,13 +107478,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1251 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1251.size); - String _elem1252; - for (int _i1253 = 0; _i1253 < _list1251.size; ++_i1253) + org.apache.thrift.protocol.TList _list1261 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1261.size); + String _elem1262; + for (int _i1263 = 0; _i1263 < _list1261.size; ++_i1263) { - _elem1252 = iprot.readString(); - struct.group_names.add(_elem1252); + _elem1262 = iprot.readString(); + struct.group_names.add(_elem1262); } } struct.setGroup_namesIsSet(true); @@ -109728,14 +110253,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1254 = iprot.readListBegin(); - struct.success = new ArrayList(_list1254.size); - Partition _elem1255; - for (int _i1256 = 0; _i1256 < _list1254.size; ++_i1256) + org.apache.thrift.protocol.TList _list1264 = iprot.readListBegin(); + struct.success = new ArrayList(_list1264.size); + Partition _elem1265; + for (int _i1266 = 0; _i1266 < _list1264.size; ++_i1266) { - _elem1255 = new Partition(); - _elem1255.read(iprot); - struct.success.add(_elem1255); + _elem1265 = new Partition(); + _elem1265.read(iprot); + struct.success.add(_elem1265); } iprot.readListEnd(); } @@ -109779,9 +110304,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1257 : struct.success) + for (Partition _iter1267 : struct.success) { - _iter1257.write(oprot); + _iter1267.write(oprot); } oprot.writeListEnd(); } @@ -109828,9 +110353,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1258 : struct.success) + for (Partition _iter1268 : struct.success) { - _iter1258.write(oprot); + _iter1268.write(oprot); } } } @@ -109848,14 +110373,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1259 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1259.size); - Partition _elem1260; - for (int _i1261 = 0; _i1261 < _list1259.size; ++_i1261) + org.apache.thrift.protocol.TList _list1269 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1269.size); + Partition _elem1270; + for (int _i1271 = 0; _i1271 < _list1269.size; ++_i1271) { - _elem1260 = new Partition(); - _elem1260.read(iprot); - struct.success.add(_elem1260); + _elem1270 = new Partition(); + _elem1270.read(iprot); + struct.success.add(_elem1270); } } struct.setSuccessIsSet(true); @@ -110545,13 +111070,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1262 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1262.size); - String _elem1263; - for (int _i1264 = 0; _i1264 < _list1262.size; ++_i1264) + org.apache.thrift.protocol.TList _list1272 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1272.size); + String _elem1273; + for (int _i1274 = 0; _i1274 < _list1272.size; ++_i1274) { - _elem1263 = iprot.readString(); - struct.group_names.add(_elem1263); + _elem1273 = iprot.readString(); + struct.group_names.add(_elem1273); } iprot.readListEnd(); } @@ -110595,9 +111120,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1265 : struct.group_names) + for (String _iter1275 : struct.group_names) { - oprot.writeString(_iter1265); + oprot.writeString(_iter1275); } oprot.writeListEnd(); } @@ -110652,9 +111177,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1266 : struct.group_names) + for (String _iter1276 : struct.group_names) { - oprot.writeString(_iter1266); + oprot.writeString(_iter1276); } } } @@ -110682,13 +111207,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1267 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1267.size); - String _elem1268; - for (int _i1269 = 0; _i1269 < _list1267.size; ++_i1269) + org.apache.thrift.protocol.TList _list1277 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1277.size); + String _elem1278; + for (int _i1279 = 0; _i1279 < _list1277.size; ++_i1279) { - _elem1268 = iprot.readString(); - struct.group_names.add(_elem1268); + _elem1278 = iprot.readString(); + struct.group_names.add(_elem1278); } } struct.setGroup_namesIsSet(true); @@ -111175,14 +111700,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1270 = iprot.readListBegin(); - struct.success = new ArrayList(_list1270.size); - Partition _elem1271; - for (int _i1272 = 0; _i1272 < _list1270.size; ++_i1272) + org.apache.thrift.protocol.TList _list1280 = iprot.readListBegin(); + struct.success = new ArrayList(_list1280.size); + Partition _elem1281; + for (int _i1282 = 0; _i1282 < _list1280.size; ++_i1282) { - _elem1271 = new Partition(); - _elem1271.read(iprot); - struct.success.add(_elem1271); + _elem1281 = new Partition(); + _elem1281.read(iprot); + struct.success.add(_elem1281); } iprot.readListEnd(); } @@ -111226,9 +111751,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1273 : struct.success) + for (Partition _iter1283 : struct.success) { - _iter1273.write(oprot); + _iter1283.write(oprot); } oprot.writeListEnd(); } @@ -111275,9 +111800,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1274 : struct.success) + for (Partition _iter1284 : struct.success) { - _iter1274.write(oprot); + _iter1284.write(oprot); } } } @@ -111295,14 +111820,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1275 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1275.size); - Partition _elem1276; - for (int _i1277 = 0; _i1277 < _list1275.size; ++_i1277) + org.apache.thrift.protocol.TList _list1285 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1285.size); + Partition _elem1286; + for (int _i1287 = 0; _i1287 < _list1285.size; ++_i1287) { - _elem1276 = new Partition(); - _elem1276.read(iprot); - struct.success.add(_elem1276); + _elem1286 = new Partition(); + _elem1286.read(iprot); + struct.success.add(_elem1286); } } struct.setSuccessIsSet(true); @@ -112365,14 +112890,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1278 = iprot.readListBegin(); - struct.success = new ArrayList(_list1278.size); - PartitionSpec _elem1279; - for (int _i1280 = 0; _i1280 < _list1278.size; ++_i1280) + org.apache.thrift.protocol.TList _list1288 = iprot.readListBegin(); + struct.success = new ArrayList(_list1288.size); + PartitionSpec _elem1289; + for (int _i1290 = 0; _i1290 < _list1288.size; ++_i1290) { - _elem1279 = new PartitionSpec(); - _elem1279.read(iprot); - struct.success.add(_elem1279); + _elem1289 = new PartitionSpec(); + _elem1289.read(iprot); + struct.success.add(_elem1289); } iprot.readListEnd(); } @@ -112416,9 +112941,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1281 : struct.success) + for (PartitionSpec _iter1291 : struct.success) { - _iter1281.write(oprot); + _iter1291.write(oprot); } oprot.writeListEnd(); } @@ -112465,9 +112990,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1282 : struct.success) + for (PartitionSpec _iter1292 : struct.success) { - _iter1282.write(oprot); + _iter1292.write(oprot); } } } @@ -112485,14 +113010,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1283 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1283.size); - PartitionSpec _elem1284; - for (int _i1285 = 0; _i1285 < _list1283.size; ++_i1285) + org.apache.thrift.protocol.TList _list1293 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1293.size); + PartitionSpec _elem1294; + for (int _i1295 = 0; _i1295 < _list1293.size; ++_i1295) { - _elem1284 = new PartitionSpec(); - _elem1284.read(iprot); - struct.success.add(_elem1284); + _elem1294 = new PartitionSpec(); + _elem1294.read(iprot); + struct.success.add(_elem1294); } } struct.setSuccessIsSet(true); @@ -113552,13 +114077,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1286 = iprot.readListBegin(); - struct.success = new ArrayList(_list1286.size); - String _elem1287; - for (int _i1288 = 0; _i1288 < _list1286.size; ++_i1288) + org.apache.thrift.protocol.TList _list1296 = iprot.readListBegin(); + struct.success = new ArrayList(_list1296.size); + String _elem1297; + for (int _i1298 = 0; _i1298 < _list1296.size; ++_i1298) { - _elem1287 = iprot.readString(); - struct.success.add(_elem1287); + _elem1297 = iprot.readString(); + struct.success.add(_elem1297); } iprot.readListEnd(); } @@ -113602,9 +114127,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1289 : struct.success) + for (String _iter1299 : struct.success) { - oprot.writeString(_iter1289); + oprot.writeString(_iter1299); } oprot.writeListEnd(); } @@ -113651,9 +114176,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1290 : struct.success) + for (String _iter1300 : struct.success) { - oprot.writeString(_iter1290); + oprot.writeString(_iter1300); } } } @@ -113671,13 +114196,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1291 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1291.size); - String _elem1292; - for (int _i1293 = 0; _i1293 < _list1291.size; ++_i1293) + org.apache.thrift.protocol.TList _list1301 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1301.size); + String _elem1302; + for (int _i1303 = 0; _i1303 < _list1301.size; ++_i1303) { - _elem1292 = iprot.readString(); - struct.success.add(_elem1292); + _elem1302 = iprot.readString(); + struct.success.add(_elem1302); } } struct.setSuccessIsSet(true); @@ -115208,13 +115733,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1294 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1294.size); - String _elem1295; - for (int _i1296 = 0; _i1296 < _list1294.size; ++_i1296) + org.apache.thrift.protocol.TList _list1304 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1304.size); + String _elem1305; + for (int _i1306 = 0; _i1306 < _list1304.size; ++_i1306) { - _elem1295 = iprot.readString(); - struct.part_vals.add(_elem1295); + _elem1305 = iprot.readString(); + struct.part_vals.add(_elem1305); } iprot.readListEnd(); } @@ -115258,9 +115783,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1297 : struct.part_vals) + for (String _iter1307 : struct.part_vals) { - oprot.writeString(_iter1297); + oprot.writeString(_iter1307); } oprot.writeListEnd(); } @@ -115309,9 +115834,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1298 : struct.part_vals) + for (String _iter1308 : struct.part_vals) { - oprot.writeString(_iter1298); + oprot.writeString(_iter1308); } } } @@ -115334,13 +115859,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1299 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1299.size); - String _elem1300; - for (int _i1301 = 0; _i1301 < _list1299.size; ++_i1301) + org.apache.thrift.protocol.TList _list1309 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1309.size); + String _elem1310; + for (int _i1311 = 0; _i1311 < _list1309.size; ++_i1311) { - _elem1300 = iprot.readString(); - struct.part_vals.add(_elem1300); + _elem1310 = iprot.readString(); + struct.part_vals.add(_elem1310); } } struct.setPart_valsIsSet(true); @@ -115831,14 +116356,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1302 = iprot.readListBegin(); - struct.success = new ArrayList(_list1302.size); - Partition _elem1303; - for (int _i1304 = 0; _i1304 < _list1302.size; ++_i1304) + org.apache.thrift.protocol.TList _list1312 = iprot.readListBegin(); + struct.success = new ArrayList(_list1312.size); + Partition _elem1313; + for (int _i1314 = 0; _i1314 < _list1312.size; ++_i1314) { - _elem1303 = new Partition(); - _elem1303.read(iprot); - struct.success.add(_elem1303); + _elem1313 = new Partition(); + _elem1313.read(iprot); + struct.success.add(_elem1313); } iprot.readListEnd(); } @@ -115882,9 +116407,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1305 : struct.success) + for (Partition _iter1315 : struct.success) { - _iter1305.write(oprot); + _iter1315.write(oprot); } oprot.writeListEnd(); } @@ -115931,9 +116456,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1306 : struct.success) + for (Partition _iter1316 : struct.success) { - _iter1306.write(oprot); + _iter1316.write(oprot); } } } @@ -115951,14 +116476,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1307 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1307.size); - Partition _elem1308; - for (int _i1309 = 0; _i1309 < _list1307.size; ++_i1309) + org.apache.thrift.protocol.TList _list1317 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1317.size); + Partition _elem1318; + for (int _i1319 = 0; _i1319 < _list1317.size; ++_i1319) { - _elem1308 = new Partition(); - _elem1308.read(iprot); - struct.success.add(_elem1308); + _elem1318 = new Partition(); + _elem1318.read(iprot); + struct.success.add(_elem1318); } } struct.setSuccessIsSet(true); @@ -116730,13 +117255,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1310 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1310.size); - String _elem1311; - for (int _i1312 = 0; _i1312 < _list1310.size; ++_i1312) + org.apache.thrift.protocol.TList _list1320 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1320.size); + String _elem1321; + for (int _i1322 = 0; _i1322 < _list1320.size; ++_i1322) { - _elem1311 = iprot.readString(); - struct.part_vals.add(_elem1311); + _elem1321 = iprot.readString(); + struct.part_vals.add(_elem1321); } iprot.readListEnd(); } @@ -116764,13 +117289,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1313 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1313.size); - String _elem1314; - for (int _i1315 = 0; _i1315 < _list1313.size; ++_i1315) + org.apache.thrift.protocol.TList _list1323 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1323.size); + String _elem1324; + for (int _i1325 = 0; _i1325 < _list1323.size; ++_i1325) { - _elem1314 = iprot.readString(); - struct.group_names.add(_elem1314); + _elem1324 = iprot.readString(); + struct.group_names.add(_elem1324); } iprot.readListEnd(); } @@ -116806,9 +117331,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1316 : struct.part_vals) + for (String _iter1326 : struct.part_vals) { - oprot.writeString(_iter1316); + oprot.writeString(_iter1326); } oprot.writeListEnd(); } @@ -116826,9 +117351,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1317 : struct.group_names) + for (String _iter1327 : struct.group_names) { - oprot.writeString(_iter1317); + oprot.writeString(_iter1327); } oprot.writeListEnd(); } @@ -116880,9 +117405,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1318 : struct.part_vals) + for (String _iter1328 : struct.part_vals) { - oprot.writeString(_iter1318); + oprot.writeString(_iter1328); } } } @@ -116895,9 +117420,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1319 : struct.group_names) + for (String _iter1329 : struct.group_names) { - oprot.writeString(_iter1319); + oprot.writeString(_iter1329); } } } @@ -116917,13 +117442,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1320 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1320.size); - String _elem1321; - for (int _i1322 = 0; _i1322 < _list1320.size; ++_i1322) + org.apache.thrift.protocol.TList _list1330 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1330.size); + String _elem1331; + for (int _i1332 = 0; _i1332 < _list1330.size; ++_i1332) { - _elem1321 = iprot.readString(); - struct.part_vals.add(_elem1321); + _elem1331 = iprot.readString(); + struct.part_vals.add(_elem1331); } } struct.setPart_valsIsSet(true); @@ -116938,13 +117463,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1323 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1323.size); - String _elem1324; - for (int _i1325 = 0; _i1325 < _list1323.size; ++_i1325) + org.apache.thrift.protocol.TList _list1333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1333.size); + String _elem1334; + for (int _i1335 = 0; _i1335 < _list1333.size; ++_i1335) { - _elem1324 = iprot.readString(); - struct.group_names.add(_elem1324); + _elem1334 = iprot.readString(); + struct.group_names.add(_elem1334); } } struct.setGroup_namesIsSet(true); @@ -117431,14 +117956,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1326 = iprot.readListBegin(); - struct.success = new ArrayList(_list1326.size); - Partition _elem1327; - for (int _i1328 = 0; _i1328 < _list1326.size; ++_i1328) + org.apache.thrift.protocol.TList _list1336 = iprot.readListBegin(); + struct.success = new ArrayList(_list1336.size); + Partition _elem1337; + for (int _i1338 = 0; _i1338 < _list1336.size; ++_i1338) { - _elem1327 = new Partition(); - _elem1327.read(iprot); - struct.success.add(_elem1327); + _elem1337 = new Partition(); + _elem1337.read(iprot); + struct.success.add(_elem1337); } iprot.readListEnd(); } @@ -117482,9 +118007,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1329 : struct.success) + for (Partition _iter1339 : struct.success) { - _iter1329.write(oprot); + _iter1339.write(oprot); } oprot.writeListEnd(); } @@ -117531,9 +118056,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1330 : struct.success) + for (Partition _iter1340 : struct.success) { - _iter1330.write(oprot); + _iter1340.write(oprot); } } } @@ -117551,14 +118076,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1331 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1331.size); - Partition _elem1332; - for (int _i1333 = 0; _i1333 < _list1331.size; ++_i1333) + org.apache.thrift.protocol.TList _list1341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1341.size); + Partition _elem1342; + for (int _i1343 = 0; _i1343 < _list1341.size; ++_i1343) { - _elem1332 = new Partition(); - _elem1332.read(iprot); - struct.success.add(_elem1332); + _elem1342 = new Partition(); + _elem1342.read(iprot); + struct.success.add(_elem1342); } } struct.setSuccessIsSet(true); @@ -118151,13 +118676,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1334 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1334.size); - String _elem1335; - for (int _i1336 = 0; _i1336 < _list1334.size; ++_i1336) + org.apache.thrift.protocol.TList _list1344 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1344.size); + String _elem1345; + for (int _i1346 = 0; _i1346 < _list1344.size; ++_i1346) { - _elem1335 = iprot.readString(); - struct.part_vals.add(_elem1335); + _elem1345 = iprot.readString(); + struct.part_vals.add(_elem1345); } iprot.readListEnd(); } @@ -118201,9 +118726,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1337 : struct.part_vals) + for (String _iter1347 : struct.part_vals) { - oprot.writeString(_iter1337); + oprot.writeString(_iter1347); } oprot.writeListEnd(); } @@ -118252,9 +118777,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1338 : struct.part_vals) + for (String _iter1348 : struct.part_vals) { - oprot.writeString(_iter1338); + oprot.writeString(_iter1348); } } } @@ -118277,13 +118802,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1339 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1339.size); - String _elem1340; - for (int _i1341 = 0; _i1341 < _list1339.size; ++_i1341) + org.apache.thrift.protocol.TList _list1349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1349.size); + String _elem1350; + for (int _i1351 = 0; _i1351 < _list1349.size; ++_i1351) { - _elem1340 = iprot.readString(); - struct.part_vals.add(_elem1340); + _elem1350 = iprot.readString(); + struct.part_vals.add(_elem1350); } } struct.setPart_valsIsSet(true); @@ -118771,13 +119296,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1342 = iprot.readListBegin(); - struct.success = new ArrayList(_list1342.size); - String _elem1343; - for (int _i1344 = 0; _i1344 < _list1342.size; ++_i1344) + org.apache.thrift.protocol.TList _list1352 = iprot.readListBegin(); + struct.success = new ArrayList(_list1352.size); + String _elem1353; + for (int _i1354 = 0; _i1354 < _list1352.size; ++_i1354) { - _elem1343 = iprot.readString(); - struct.success.add(_elem1343); + _elem1353 = iprot.readString(); + struct.success.add(_elem1353); } iprot.readListEnd(); } @@ -118821,9 +119346,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1345 : struct.success) + for (String _iter1355 : struct.success) { - oprot.writeString(_iter1345); + oprot.writeString(_iter1355); } oprot.writeListEnd(); } @@ -118870,9 +119395,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1346 : struct.success) + for (String _iter1356 : struct.success) { - oprot.writeString(_iter1346); + oprot.writeString(_iter1356); } } } @@ -118890,13 +119415,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1347 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1347.size); - String _elem1348; - for (int _i1349 = 0; _i1349 < _list1347.size; ++_i1349) + org.apache.thrift.protocol.TList _list1357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1357.size); + String _elem1358; + for (int _i1359 = 0; _i1359 < _list1357.size; ++_i1359) { - _elem1348 = iprot.readString(); - struct.success.add(_elem1348); + _elem1358 = iprot.readString(); + struct.success.add(_elem1358); } } struct.setSuccessIsSet(true); @@ -120063,14 +120588,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1350 = iprot.readListBegin(); - struct.success = new ArrayList(_list1350.size); - Partition _elem1351; - for (int _i1352 = 0; _i1352 < _list1350.size; ++_i1352) + org.apache.thrift.protocol.TList _list1360 = iprot.readListBegin(); + struct.success = new ArrayList(_list1360.size); + Partition _elem1361; + for (int _i1362 = 0; _i1362 < _list1360.size; ++_i1362) { - _elem1351 = new Partition(); - _elem1351.read(iprot); - struct.success.add(_elem1351); + _elem1361 = new Partition(); + _elem1361.read(iprot); + struct.success.add(_elem1361); } iprot.readListEnd(); } @@ -120114,9 +120639,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1353 : struct.success) + for (Partition _iter1363 : struct.success) { - _iter1353.write(oprot); + _iter1363.write(oprot); } oprot.writeListEnd(); } @@ -120163,9 +120688,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1354 : struct.success) + for (Partition _iter1364 : struct.success) { - _iter1354.write(oprot); + _iter1364.write(oprot); } } } @@ -120183,14 +120708,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1355 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1355.size); - Partition _elem1356; - for (int _i1357 = 0; _i1357 < _list1355.size; ++_i1357) + org.apache.thrift.protocol.TList _list1365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1365.size); + Partition _elem1366; + for (int _i1367 = 0; _i1367 < _list1365.size; ++_i1367) { - _elem1356 = new Partition(); - _elem1356.read(iprot); - struct.success.add(_elem1356); + _elem1366 = new Partition(); + _elem1366.read(iprot); + struct.success.add(_elem1366); } } struct.setSuccessIsSet(true); @@ -121357,14 +121882,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1358 = iprot.readListBegin(); - struct.success = new ArrayList(_list1358.size); - PartitionSpec _elem1359; - for (int _i1360 = 0; _i1360 < _list1358.size; ++_i1360) + org.apache.thrift.protocol.TList _list1368 = iprot.readListBegin(); + struct.success = new ArrayList(_list1368.size); + PartitionSpec _elem1369; + for (int _i1370 = 0; _i1370 < _list1368.size; ++_i1370) { - _elem1359 = new PartitionSpec(); - _elem1359.read(iprot); - struct.success.add(_elem1359); + _elem1369 = new PartitionSpec(); + _elem1369.read(iprot); + struct.success.add(_elem1369); } iprot.readListEnd(); } @@ -121408,9 +121933,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1361 : struct.success) + for (PartitionSpec _iter1371 : struct.success) { - _iter1361.write(oprot); + _iter1371.write(oprot); } oprot.writeListEnd(); } @@ -121457,9 +121982,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1362 : struct.success) + for (PartitionSpec _iter1372 : struct.success) { - _iter1362.write(oprot); + _iter1372.write(oprot); } } } @@ -121477,14 +122002,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1363 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1363.size); - PartitionSpec _elem1364; - for (int _i1365 = 0; _i1365 < _list1363.size; ++_i1365) + org.apache.thrift.protocol.TList _list1373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1373.size); + PartitionSpec _elem1374; + for (int _i1375 = 0; _i1375 < _list1373.size; ++_i1375) { - _elem1364 = new PartitionSpec(); - _elem1364.read(iprot); - struct.success.add(_elem1364); + _elem1374 = new PartitionSpec(); + _elem1374.read(iprot); + struct.success.add(_elem1374); } } struct.setSuccessIsSet(true); @@ -124068,13 +124593,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1366 = iprot.readListBegin(); - struct.names = new ArrayList(_list1366.size); - String _elem1367; - for (int _i1368 = 0; _i1368 < _list1366.size; ++_i1368) + org.apache.thrift.protocol.TList _list1376 = iprot.readListBegin(); + struct.names = new ArrayList(_list1376.size); + String _elem1377; + for (int _i1378 = 0; _i1378 < _list1376.size; ++_i1378) { - _elem1367 = iprot.readString(); - struct.names.add(_elem1367); + _elem1377 = iprot.readString(); + struct.names.add(_elem1377); } iprot.readListEnd(); } @@ -124110,9 +124635,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1369 : struct.names) + for (String _iter1379 : struct.names) { - oprot.writeString(_iter1369); + oprot.writeString(_iter1379); } oprot.writeListEnd(); } @@ -124155,9 +124680,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1370 : struct.names) + for (String _iter1380 : struct.names) { - oprot.writeString(_iter1370); + oprot.writeString(_iter1380); } } } @@ -124177,13 +124702,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1371 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1371.size); - String _elem1372; - for (int _i1373 = 0; _i1373 < _list1371.size; ++_i1373) + org.apache.thrift.protocol.TList _list1381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1381.size); + String _elem1382; + for (int _i1383 = 0; _i1383 < _list1381.size; ++_i1383) { - _elem1372 = iprot.readString(); - struct.names.add(_elem1372); + _elem1382 = iprot.readString(); + struct.names.add(_elem1382); } } struct.setNamesIsSet(true); @@ -124670,14 +125195,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1374 = iprot.readListBegin(); - struct.success = new ArrayList(_list1374.size); - Partition _elem1375; - for (int _i1376 = 0; _i1376 < _list1374.size; ++_i1376) + org.apache.thrift.protocol.TList _list1384 = iprot.readListBegin(); + struct.success = new ArrayList(_list1384.size); + Partition _elem1385; + for (int _i1386 = 0; _i1386 < _list1384.size; ++_i1386) { - _elem1375 = new Partition(); - _elem1375.read(iprot); - struct.success.add(_elem1375); + _elem1385 = new Partition(); + _elem1385.read(iprot); + struct.success.add(_elem1385); } iprot.readListEnd(); } @@ -124721,9 +125246,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1377 : struct.success) + for (Partition _iter1387 : struct.success) { - _iter1377.write(oprot); + _iter1387.write(oprot); } oprot.writeListEnd(); } @@ -124770,9 +125295,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1378 : struct.success) + for (Partition _iter1388 : struct.success) { - _iter1378.write(oprot); + _iter1388.write(oprot); } } } @@ -124790,14 +125315,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1379 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1379.size); - Partition _elem1380; - for (int _i1381 = 0; _i1381 < _list1379.size; ++_i1381) + org.apache.thrift.protocol.TList _list1389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1389.size); + Partition _elem1390; + for (int _i1391 = 0; _i1391 < _list1389.size; ++_i1391) { - _elem1380 = new Partition(); - _elem1380.read(iprot); - struct.success.add(_elem1380); + _elem1390 = new Partition(); + _elem1390.read(iprot); + struct.success.add(_elem1390); } } struct.setSuccessIsSet(true); @@ -126347,14 +126872,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1382 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1382.size); - Partition _elem1383; - for (int _i1384 = 0; _i1384 < _list1382.size; ++_i1384) + org.apache.thrift.protocol.TList _list1392 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1392.size); + Partition _elem1393; + for (int _i1394 = 0; _i1394 < _list1392.size; ++_i1394) { - _elem1383 = new Partition(); - _elem1383.read(iprot); - struct.new_parts.add(_elem1383); + _elem1393 = new Partition(); + _elem1393.read(iprot); + struct.new_parts.add(_elem1393); } iprot.readListEnd(); } @@ -126390,9 +126915,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1385 : struct.new_parts) + for (Partition _iter1395 : struct.new_parts) { - _iter1385.write(oprot); + _iter1395.write(oprot); } oprot.writeListEnd(); } @@ -126435,9 +126960,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1386 : struct.new_parts) + for (Partition _iter1396 : struct.new_parts) { - _iter1386.write(oprot); + _iter1396.write(oprot); } } } @@ -126457,14 +126982,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1387 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1387.size); - Partition _elem1388; - for (int _i1389 = 0; _i1389 < _list1387.size; ++_i1389) + org.apache.thrift.protocol.TList _list1397 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1397.size); + Partition _elem1398; + for (int _i1399 = 0; _i1399 < _list1397.size; ++_i1399) { - _elem1388 = new Partition(); - _elem1388.read(iprot); - struct.new_parts.add(_elem1388); + _elem1398 = new Partition(); + _elem1398.read(iprot); + struct.new_parts.add(_elem1398); } } struct.setNew_partsIsSet(true); @@ -127517,14 +128042,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1390 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1390.size); - Partition _elem1391; - for (int _i1392 = 0; _i1392 < _list1390.size; ++_i1392) + org.apache.thrift.protocol.TList _list1400 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1400.size); + Partition _elem1401; + for (int _i1402 = 0; _i1402 < _list1400.size; ++_i1402) { - _elem1391 = new Partition(); - _elem1391.read(iprot); - struct.new_parts.add(_elem1391); + _elem1401 = new Partition(); + _elem1401.read(iprot); + struct.new_parts.add(_elem1401); } iprot.readListEnd(); } @@ -127569,9 +128094,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1393 : struct.new_parts) + for (Partition _iter1403 : struct.new_parts) { - _iter1393.write(oprot); + _iter1403.write(oprot); } oprot.writeListEnd(); } @@ -127622,9 +128147,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1394 : struct.new_parts) + for (Partition _iter1404 : struct.new_parts) { - _iter1394.write(oprot); + _iter1404.write(oprot); } } } @@ -127647,14 +128172,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1395 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1395.size); - Partition _elem1396; - for (int _i1397 = 0; _i1397 < _list1395.size; ++_i1397) + org.apache.thrift.protocol.TList _list1405 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1405.size); + Partition _elem1406; + for (int _i1407 = 0; _i1407 < _list1405.size; ++_i1407) { - _elem1396 = new Partition(); - _elem1396.read(iprot); - struct.new_parts.add(_elem1396); + _elem1406 = new Partition(); + _elem1406.read(iprot); + struct.new_parts.add(_elem1406); } } struct.setNew_partsIsSet(true); @@ -130793,13 +131318,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1398 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1398.size); - String _elem1399; - for (int _i1400 = 0; _i1400 < _list1398.size; ++_i1400) + org.apache.thrift.protocol.TList _list1408 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1408.size); + String _elem1409; + for (int _i1410 = 0; _i1410 < _list1408.size; ++_i1410) { - _elem1399 = iprot.readString(); - struct.part_vals.add(_elem1399); + _elem1409 = iprot.readString(); + struct.part_vals.add(_elem1409); } iprot.readListEnd(); } @@ -130844,9 +131369,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1401 : struct.part_vals) + for (String _iter1411 : struct.part_vals) { - oprot.writeString(_iter1401); + oprot.writeString(_iter1411); } oprot.writeListEnd(); } @@ -130897,9 +131422,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1402 : struct.part_vals) + for (String _iter1412 : struct.part_vals) { - oprot.writeString(_iter1402); + oprot.writeString(_iter1412); } } } @@ -130922,13 +131447,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1403 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1403.size); - String _elem1404; - for (int _i1405 = 0; _i1405 < _list1403.size; ++_i1405) + org.apache.thrift.protocol.TList _list1413 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1413.size); + String _elem1414; + for (int _i1415 = 0; _i1415 < _list1413.size; ++_i1415) { - _elem1404 = iprot.readString(); - struct.part_vals.add(_elem1404); + _elem1414 = iprot.readString(); + struct.part_vals.add(_elem1414); } } struct.setPart_valsIsSet(true); @@ -132740,13 +133265,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1406 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1406.size); - String _elem1407; - for (int _i1408 = 0; _i1408 < _list1406.size; ++_i1408) + org.apache.thrift.protocol.TList _list1416 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1416.size); + String _elem1417; + for (int _i1418 = 0; _i1418 < _list1416.size; ++_i1418) { - _elem1407 = iprot.readString(); - struct.part_vals.add(_elem1407); + _elem1417 = iprot.readString(); + struct.part_vals.add(_elem1417); } iprot.readListEnd(); } @@ -132780,9 +133305,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1409 : struct.part_vals) + for (String _iter1419 : struct.part_vals) { - oprot.writeString(_iter1409); + oprot.writeString(_iter1419); } oprot.writeListEnd(); } @@ -132819,9 +133344,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1410 : struct.part_vals) + for (String _iter1420 : struct.part_vals) { - oprot.writeString(_iter1410); + oprot.writeString(_iter1420); } } } @@ -132836,13 +133361,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1411 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1411.size); - String _elem1412; - for (int _i1413 = 0; _i1413 < _list1411.size; ++_i1413) + org.apache.thrift.protocol.TList _list1421 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1421.size); + String _elem1422; + for (int _i1423 = 0; _i1423 < _list1421.size; ++_i1423) { - _elem1412 = iprot.readString(); - struct.part_vals.add(_elem1412); + _elem1422 = iprot.readString(); + struct.part_vals.add(_elem1422); } } struct.setPart_valsIsSet(true); @@ -134997,13 +135522,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1414 = iprot.readListBegin(); - struct.success = new ArrayList(_list1414.size); - String _elem1415; - for (int _i1416 = 0; _i1416 < _list1414.size; ++_i1416) + org.apache.thrift.protocol.TList _list1424 = iprot.readListBegin(); + struct.success = new ArrayList(_list1424.size); + String _elem1425; + for (int _i1426 = 0; _i1426 < _list1424.size; ++_i1426) { - _elem1415 = iprot.readString(); - struct.success.add(_elem1415); + _elem1425 = iprot.readString(); + struct.success.add(_elem1425); } iprot.readListEnd(); } @@ -135038,9 +135563,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1417 : struct.success) + for (String _iter1427 : struct.success) { - oprot.writeString(_iter1417); + oprot.writeString(_iter1427); } oprot.writeListEnd(); } @@ -135079,9 +135604,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1418 : struct.success) + for (String _iter1428 : struct.success) { - oprot.writeString(_iter1418); + oprot.writeString(_iter1428); } } } @@ -135096,13 +135621,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1419 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1419.size); - String _elem1420; - for (int _i1421 = 0; _i1421 < _list1419.size; ++_i1421) + org.apache.thrift.protocol.TList _list1429 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1429.size); + String _elem1430; + for (int _i1431 = 0; _i1431 < _list1429.size; ++_i1431) { - _elem1420 = iprot.readString(); - struct.success.add(_elem1420); + _elem1430 = iprot.readString(); + struct.success.add(_elem1430); } } struct.setSuccessIsSet(true); @@ -135865,15 +136390,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1422 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1422.size); - String _key1423; - String _val1424; - for (int _i1425 = 0; _i1425 < _map1422.size; ++_i1425) + org.apache.thrift.protocol.TMap _map1432 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1432.size); + String _key1433; + String _val1434; + for (int _i1435 = 0; _i1435 < _map1432.size; ++_i1435) { - _key1423 = iprot.readString(); - _val1424 = iprot.readString(); - struct.success.put(_key1423, _val1424); + _key1433 = iprot.readString(); + _val1434 = iprot.readString(); + struct.success.put(_key1433, _val1434); } iprot.readMapEnd(); } @@ -135908,10 +136433,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1426 : struct.success.entrySet()) + for (Map.Entry _iter1436 : struct.success.entrySet()) { - oprot.writeString(_iter1426.getKey()); - oprot.writeString(_iter1426.getValue()); + oprot.writeString(_iter1436.getKey()); + oprot.writeString(_iter1436.getValue()); } oprot.writeMapEnd(); } @@ -135950,10 +136475,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1427 : struct.success.entrySet()) + for (Map.Entry _iter1437 : struct.success.entrySet()) { - oprot.writeString(_iter1427.getKey()); - oprot.writeString(_iter1427.getValue()); + oprot.writeString(_iter1437.getKey()); + oprot.writeString(_iter1437.getValue()); } } } @@ -135968,15 +136493,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1428 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1428.size); - String _key1429; - String _val1430; - for (int _i1431 = 0; _i1431 < _map1428.size; ++_i1431) + org.apache.thrift.protocol.TMap _map1438 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1438.size); + String _key1439; + String _val1440; + for (int _i1441 = 0; _i1441 < _map1438.size; ++_i1441) { - _key1429 = iprot.readString(); - _val1430 = iprot.readString(); - struct.success.put(_key1429, _val1430); + _key1439 = iprot.readString(); + _val1440 = iprot.readString(); + struct.success.put(_key1439, _val1440); } } struct.setSuccessIsSet(true); @@ -136571,15 +137096,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1432 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1432.size); - String _key1433; - String _val1434; - for (int _i1435 = 0; _i1435 < _map1432.size; ++_i1435) + org.apache.thrift.protocol.TMap _map1442 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1442.size); + String _key1443; + String _val1444; + for (int _i1445 = 0; _i1445 < _map1442.size; ++_i1445) { - _key1433 = iprot.readString(); - _val1434 = iprot.readString(); - struct.part_vals.put(_key1433, _val1434); + _key1443 = iprot.readString(); + _val1444 = iprot.readString(); + struct.part_vals.put(_key1443, _val1444); } iprot.readMapEnd(); } @@ -136623,10 +137148,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1436 : struct.part_vals.entrySet()) + for (Map.Entry _iter1446 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1436.getKey()); - oprot.writeString(_iter1436.getValue()); + oprot.writeString(_iter1446.getKey()); + oprot.writeString(_iter1446.getValue()); } oprot.writeMapEnd(); } @@ -136677,10 +137202,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1437 : struct.part_vals.entrySet()) + for (Map.Entry _iter1447 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1437.getKey()); - oprot.writeString(_iter1437.getValue()); + oprot.writeString(_iter1447.getKey()); + oprot.writeString(_iter1447.getValue()); } } } @@ -136703,15 +137228,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1438 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1438.size); - String _key1439; - String _val1440; - for (int _i1441 = 0; _i1441 < _map1438.size; ++_i1441) + org.apache.thrift.protocol.TMap _map1448 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1448.size); + String _key1449; + String _val1450; + for (int _i1451 = 0; _i1451 < _map1448.size; ++_i1451) { - _key1439 = iprot.readString(); - _val1440 = iprot.readString(); - struct.part_vals.put(_key1439, _val1440); + _key1449 = iprot.readString(); + _val1450 = iprot.readString(); + struct.part_vals.put(_key1449, _val1450); } } struct.setPart_valsIsSet(true); @@ -138195,15 +138720,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1442 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1442.size); - String _key1443; - String _val1444; - for (int _i1445 = 0; _i1445 < _map1442.size; ++_i1445) + org.apache.thrift.protocol.TMap _map1452 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1452.size); + String _key1453; + String _val1454; + for (int _i1455 = 0; _i1455 < _map1452.size; ++_i1455) { - _key1443 = iprot.readString(); - _val1444 = iprot.readString(); - struct.part_vals.put(_key1443, _val1444); + _key1453 = iprot.readString(); + _val1454 = iprot.readString(); + struct.part_vals.put(_key1453, _val1454); } iprot.readMapEnd(); } @@ -138247,10 +138772,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1446 : struct.part_vals.entrySet()) + for (Map.Entry _iter1456 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1446.getKey()); - oprot.writeString(_iter1446.getValue()); + oprot.writeString(_iter1456.getKey()); + oprot.writeString(_iter1456.getValue()); } oprot.writeMapEnd(); } @@ -138301,10 +138826,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1447 : struct.part_vals.entrySet()) + for (Map.Entry _iter1457 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1447.getKey()); - oprot.writeString(_iter1447.getValue()); + oprot.writeString(_iter1457.getKey()); + oprot.writeString(_iter1457.getValue()); } } } @@ -138327,15 +138852,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1448 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1448.size); - String _key1449; - String _val1450; - for (int _i1451 = 0; _i1451 < _map1448.size; ++_i1451) + org.apache.thrift.protocol.TMap _map1458 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1458.size); + String _key1459; + String _val1460; + for (int _i1461 = 0; _i1461 < _map1458.size; ++_i1461) { - _key1449 = iprot.readString(); - _val1450 = iprot.readString(); - struct.part_vals.put(_key1449, _val1450); + _key1459 = iprot.readString(); + _val1460 = iprot.readString(); + struct.part_vals.put(_key1459, _val1460); } } struct.setPart_valsIsSet(true); @@ -145210,14 +145735,3454 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("update_table_column_statistics_args("); + StringBuilder sb = new StringBuilder("update_table_column_statistics_args("); + boolean first = true; + + sb.append("stats_obj:"); + if (this.stats_obj == null) { + sb.append("null"); + } else { + sb.append(this.stats_obj); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (stats_obj != null) { + stats_obj.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class update_table_column_statistics_argsStandardSchemeFactory implements SchemeFactory { + public update_table_column_statistics_argsStandardScheme getScheme() { + return new update_table_column_statistics_argsStandardScheme(); + } + } + + private static class update_table_column_statistics_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column_statistics_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // STATS_OBJ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.stats_obj = new ColumnStatistics(); + struct.stats_obj.read(iprot); + struct.setStats_objIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_column_statistics_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.stats_obj != null) { + oprot.writeFieldBegin(STATS_OBJ_FIELD_DESC); + struct.stats_obj.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class update_table_column_statistics_argsTupleSchemeFactory implements SchemeFactory { + public update_table_column_statistics_argsTupleScheme getScheme() { + return new update_table_column_statistics_argsTupleScheme(); + } + } + + private static class update_table_column_statistics_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetStats_obj()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetStats_obj()) { + struct.stats_obj.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.stats_obj = new ColumnStatistics(); + struct.stats_obj.read(iprot); + struct.setStats_objIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_column_statistics_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_table_column_statistics_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField O4_FIELD_DESC = new org.apache.thrift.protocol.TField("o4", org.apache.thrift.protocol.TType.STRUCT, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new update_table_column_statistics_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_table_column_statistics_resultTupleSchemeFactory()); + } + + private boolean success; // required + private NoSuchObjectException o1; // required + private InvalidObjectException o2; // required + private MetaException o3; // required + private InvalidInputException o4; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"), + O4((short)4, "o4"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + case 4: // O4 + return O4; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __SUCCESS_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_table_column_statistics_result.class, metaDataMap); + } + + public update_table_column_statistics_result() { + } + + public update_table_column_statistics_result( + boolean success, + NoSuchObjectException o1, + InvalidObjectException o2, + MetaException o3, + InvalidInputException o4) + { + this(); + this.success = success; + setSuccessIsSet(true); + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + this.o4 = o4; + } + + /** + * Performs a deep copy on other. + */ + public update_table_column_statistics_result(update_table_column_statistics_result other) { + __isset_bitfield = other.__isset_bitfield; + this.success = other.success; + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } + if (other.isSetO4()) { + this.o4 = new InvalidInputException(other.o4); + } + } + + public update_table_column_statistics_result deepCopy() { + return new update_table_column_statistics_result(this); + } + + @Override + public void clear() { + setSuccessIsSet(false); + this.success = false; + this.o1 = null; + this.o2 = null; + this.o3 = null; + this.o4 = null; + } + + public boolean isSuccess() { + return this.success; + } + + public void setSuccess(boolean success) { + this.success = success; + setSuccessIsSet(true); + } + + public void unsetSuccess() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + public void setSuccessIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public InvalidObjectException getO2() { + return this.o2; + } + + public void setO2(InvalidObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public InvalidInputException getO4() { + return this.o4; + } + + public void setO4(InvalidInputException o4) { + this.o4 = o4; + } + + public void unsetO4() { + this.o4 = null; + } + + /** Returns true if field o4 is set (has been assigned a value) and false otherwise */ + public boolean isSetO4() { + return this.o4 != null; + } + + public void setO4IsSet(boolean value) { + if (!value) { + this.o4 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((Boolean)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + case O4: + if (value == null) { + unsetO4(); + } else { + setO4((InvalidInputException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return isSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + case O4: + return getO4(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + case O4: + return isSetO4(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof update_table_column_statistics_result) + return this.equals((update_table_column_statistics_result)that); + return false; + } + + public boolean equals(update_table_column_statistics_result that) { + if (that == null) + return false; + + boolean this_present_success = true; + boolean that_present_success = true; + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (this.success != that.success) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + boolean this_present_o4 = true && this.isSetO4(); + boolean that_present_o4 = true && that.isSetO4(); + if (this_present_o4 || that_present_o4) { + if (!(this_present_o4 && that_present_o4)) + return false; + if (!this.o4.equals(that.o4)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true; + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + + boolean present_o4 = true && (isSetO4()); + list.add(present_o4); + if (present_o4) + list.add(o4); + + return list.hashCode(); + } + + @Override + public int compareTo(update_table_column_statistics_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO4()).compareTo(other.isSetO4()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO4()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o4, other.o4); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("update_table_column_statistics_result("); + boolean first = true; + + sb.append("success:"); + sb.append(this.success); + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + if (!first) sb.append(", "); + sb.append("o4:"); + if (this.o4 == null) { + sb.append("null"); + } else { + sb.append(this.o4); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class update_table_column_statistics_resultStandardSchemeFactory implements SchemeFactory { + public update_table_column_statistics_resultStandardScheme getScheme() { + return new update_table_column_statistics_resultStandardScheme(); + } + } + + private static class update_table_column_statistics_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column_statistics_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // O4 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o4 = new InvalidInputException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_column_statistics_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeBool(struct.success); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o4 != null) { + oprot.writeFieldBegin(O4_FIELD_DESC); + struct.o4.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class update_table_column_statistics_resultTupleSchemeFactory implements SchemeFactory { + public update_table_column_statistics_resultTupleScheme getScheme() { + return new update_table_column_statistics_resultTupleScheme(); + } + } + + private static class update_table_column_statistics_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + if (struct.isSetO3()) { + optionals.set(3); + } + if (struct.isSetO4()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); + if (struct.isSetSuccess()) { + oprot.writeBool(struct.success); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + if (struct.isSetO4()) { + struct.o4.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(5); + if (incoming.get(0)) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(3)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + if (incoming.get(4)) { + struct.o4 = new InvalidInputException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_partition_column_statistics_args"); + + private static final org.apache.thrift.protocol.TField STATS_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("stats_obj", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new update_partition_column_statistics_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_partition_column_statistics_argsTupleSchemeFactory()); + } + + private ColumnStatistics stats_obj; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + STATS_OBJ((short)1, "stats_obj"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // STATS_OBJ + return STATS_OBJ; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.STATS_OBJ, new org.apache.thrift.meta_data.FieldMetaData("stats_obj", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatistics.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_partition_column_statistics_args.class, metaDataMap); + } + + public update_partition_column_statistics_args() { + } + + public update_partition_column_statistics_args( + ColumnStatistics stats_obj) + { + this(); + this.stats_obj = stats_obj; + } + + /** + * Performs a deep copy on other. + */ + public update_partition_column_statistics_args(update_partition_column_statistics_args other) { + if (other.isSetStats_obj()) { + this.stats_obj = new ColumnStatistics(other.stats_obj); + } + } + + public update_partition_column_statistics_args deepCopy() { + return new update_partition_column_statistics_args(this); + } + + @Override + public void clear() { + this.stats_obj = null; + } + + public ColumnStatistics getStats_obj() { + return this.stats_obj; + } + + public void setStats_obj(ColumnStatistics stats_obj) { + this.stats_obj = stats_obj; + } + + public void unsetStats_obj() { + this.stats_obj = null; + } + + /** Returns true if field stats_obj is set (has been assigned a value) and false otherwise */ + public boolean isSetStats_obj() { + return this.stats_obj != null; + } + + public void setStats_objIsSet(boolean value) { + if (!value) { + this.stats_obj = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case STATS_OBJ: + if (value == null) { + unsetStats_obj(); + } else { + setStats_obj((ColumnStatistics)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case STATS_OBJ: + return getStats_obj(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case STATS_OBJ: + return isSetStats_obj(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof update_partition_column_statistics_args) + return this.equals((update_partition_column_statistics_args)that); + return false; + } + + public boolean equals(update_partition_column_statistics_args that) { + if (that == null) + return false; + + boolean this_present_stats_obj = true && this.isSetStats_obj(); + boolean that_present_stats_obj = true && that.isSetStats_obj(); + if (this_present_stats_obj || that_present_stats_obj) { + if (!(this_present_stats_obj && that_present_stats_obj)) + return false; + if (!this.stats_obj.equals(that.stats_obj)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_stats_obj = true && (isSetStats_obj()); + list.add(present_stats_obj); + if (present_stats_obj) + list.add(stats_obj); + + return list.hashCode(); + } + + @Override + public int compareTo(update_partition_column_statistics_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetStats_obj()).compareTo(other.isSetStats_obj()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetStats_obj()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.stats_obj, other.stats_obj); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("update_partition_column_statistics_args("); + boolean first = true; + + sb.append("stats_obj:"); + if (this.stats_obj == null) { + sb.append("null"); + } else { + sb.append(this.stats_obj); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (stats_obj != null) { + stats_obj.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class update_partition_column_statistics_argsStandardSchemeFactory implements SchemeFactory { + public update_partition_column_statistics_argsStandardScheme getScheme() { + return new update_partition_column_statistics_argsStandardScheme(); + } + } + + private static class update_partition_column_statistics_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_column_statistics_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // STATS_OBJ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.stats_obj = new ColumnStatistics(); + struct.stats_obj.read(iprot); + struct.setStats_objIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_column_statistics_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.stats_obj != null) { + oprot.writeFieldBegin(STATS_OBJ_FIELD_DESC); + struct.stats_obj.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class update_partition_column_statistics_argsTupleSchemeFactory implements SchemeFactory { + public update_partition_column_statistics_argsTupleScheme getScheme() { + return new update_partition_column_statistics_argsTupleScheme(); + } + } + + private static class update_partition_column_statistics_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetStats_obj()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetStats_obj()) { + struct.stats_obj.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.stats_obj = new ColumnStatistics(); + struct.stats_obj.read(iprot); + struct.setStats_objIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_partition_column_statistics_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField O4_FIELD_DESC = new org.apache.thrift.protocol.TField("o4", org.apache.thrift.protocol.TType.STRUCT, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new update_partition_column_statistics_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_partition_column_statistics_resultTupleSchemeFactory()); + } + + private boolean success; // required + private NoSuchObjectException o1; // required + private InvalidObjectException o2; // required + private MetaException o3; // required + private InvalidInputException o4; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"), + O4((short)4, "o4"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + case 4: // O4 + return O4; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __SUCCESS_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_partition_column_statistics_result.class, metaDataMap); + } + + public update_partition_column_statistics_result() { + } + + public update_partition_column_statistics_result( + boolean success, + NoSuchObjectException o1, + InvalidObjectException o2, + MetaException o3, + InvalidInputException o4) + { + this(); + this.success = success; + setSuccessIsSet(true); + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + this.o4 = o4; + } + + /** + * Performs a deep copy on other. + */ + public update_partition_column_statistics_result(update_partition_column_statistics_result other) { + __isset_bitfield = other.__isset_bitfield; + this.success = other.success; + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } + if (other.isSetO4()) { + this.o4 = new InvalidInputException(other.o4); + } + } + + public update_partition_column_statistics_result deepCopy() { + return new update_partition_column_statistics_result(this); + } + + @Override + public void clear() { + setSuccessIsSet(false); + this.success = false; + this.o1 = null; + this.o2 = null; + this.o3 = null; + this.o4 = null; + } + + public boolean isSuccess() { + return this.success; + } + + public void setSuccess(boolean success) { + this.success = success; + setSuccessIsSet(true); + } + + public void unsetSuccess() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + public void setSuccessIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public InvalidObjectException getO2() { + return this.o2; + } + + public void setO2(InvalidObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public InvalidInputException getO4() { + return this.o4; + } + + public void setO4(InvalidInputException o4) { + this.o4 = o4; + } + + public void unsetO4() { + this.o4 = null; + } + + /** Returns true if field o4 is set (has been assigned a value) and false otherwise */ + public boolean isSetO4() { + return this.o4 != null; + } + + public void setO4IsSet(boolean value) { + if (!value) { + this.o4 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((Boolean)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + case O4: + if (value == null) { + unsetO4(); + } else { + setO4((InvalidInputException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return isSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + case O4: + return getO4(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + case O4: + return isSetO4(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof update_partition_column_statistics_result) + return this.equals((update_partition_column_statistics_result)that); + return false; + } + + public boolean equals(update_partition_column_statistics_result that) { + if (that == null) + return false; + + boolean this_present_success = true; + boolean that_present_success = true; + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (this.success != that.success) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + boolean this_present_o4 = true && this.isSetO4(); + boolean that_present_o4 = true && that.isSetO4(); + if (this_present_o4 || that_present_o4) { + if (!(this_present_o4 && that_present_o4)) + return false; + if (!this.o4.equals(that.o4)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true; + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + + boolean present_o4 = true && (isSetO4()); + list.add(present_o4); + if (present_o4) + list.add(o4); + + return list.hashCode(); + } + + @Override + public int compareTo(update_partition_column_statistics_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO4()).compareTo(other.isSetO4()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO4()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o4, other.o4); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("update_partition_column_statistics_result("); + boolean first = true; + + sb.append("success:"); + sb.append(this.success); + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + if (!first) sb.append(", "); + sb.append("o4:"); + if (this.o4 == null) { + sb.append("null"); + } else { + sb.append(this.o4); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class update_partition_column_statistics_resultStandardSchemeFactory implements SchemeFactory { + public update_partition_column_statistics_resultStandardScheme getScheme() { + return new update_partition_column_statistics_resultStandardScheme(); + } + } + + private static class update_partition_column_statistics_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_column_statistics_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // O4 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o4 = new InvalidInputException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_column_statistics_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeBool(struct.success); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o4 != null) { + oprot.writeFieldBegin(O4_FIELD_DESC); + struct.o4.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class update_partition_column_statistics_resultTupleSchemeFactory implements SchemeFactory { + public update_partition_column_statistics_resultTupleScheme getScheme() { + return new update_partition_column_statistics_resultTupleScheme(); + } + } + + private static class update_partition_column_statistics_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + if (struct.isSetO3()) { + optionals.set(3); + } + if (struct.isSetO4()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); + if (struct.isSetSuccess()) { + oprot.writeBool(struct.success); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + if (struct.isSetO4()) { + struct.o4.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(5); + if (incoming.get(0)) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(3)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + if (incoming.get(4)) { + struct.o4 = new InvalidInputException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_column_statistics_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_table_column_statistics_req_args"); + + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new update_table_column_statistics_req_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_table_column_statistics_req_argsTupleSchemeFactory()); + } + + private SetPartitionsStatsRequest req; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQ((short)1, "req"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQ + return REQ; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SetPartitionsStatsRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_table_column_statistics_req_args.class, metaDataMap); + } + + public update_table_column_statistics_req_args() { + } + + public update_table_column_statistics_req_args( + SetPartitionsStatsRequest req) + { + this(); + this.req = req; + } + + /** + * Performs a deep copy on other. + */ + public update_table_column_statistics_req_args(update_table_column_statistics_req_args other) { + if (other.isSetReq()) { + this.req = new SetPartitionsStatsRequest(other.req); + } + } + + public update_table_column_statistics_req_args deepCopy() { + return new update_table_column_statistics_req_args(this); + } + + @Override + public void clear() { + this.req = null; + } + + public SetPartitionsStatsRequest getReq() { + return this.req; + } + + public void setReq(SetPartitionsStatsRequest req) { + this.req = req; + } + + public void unsetReq() { + this.req = null; + } + + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; + } + + public void setReqIsSet(boolean value) { + if (!value) { + this.req = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQ: + if (value == null) { + unsetReq(); + } else { + setReq((SetPartitionsStatsRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQ: + return getReq(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQ: + return isSetReq(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof update_table_column_statistics_req_args) + return this.equals((update_table_column_statistics_req_args)that); + return false; + } + + public boolean equals(update_table_column_statistics_req_args that) { + if (that == null) + return false; + + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) + return false; + if (!this.req.equals(that.req)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); + + return list.hashCode(); + } + + @Override + public int compareTo(update_table_column_statistics_req_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("update_table_column_statistics_req_args("); + boolean first = true; + + sb.append("req:"); + if (this.req == null) { + sb.append("null"); + } else { + sb.append(this.req); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (req != null) { + req.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class update_table_column_statistics_req_argsStandardSchemeFactory implements SchemeFactory { + public update_table_column_statistics_req_argsStandardScheme getScheme() { + return new update_table_column_statistics_req_argsStandardScheme(); + } + } + + private static class update_table_column_statistics_req_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column_statistics_req_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new SetPartitionsStatsRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_column_statistics_req_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class update_table_column_statistics_req_argsTupleSchemeFactory implements SchemeFactory { + public update_table_column_statistics_req_argsTupleScheme getScheme() { + return new update_table_column_statistics_req_argsTupleScheme(); + } + } + + private static class update_table_column_statistics_req_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_req_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetReq()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_req_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.req = new SetPartitionsStatsRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_column_statistics_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_table_column_statistics_req_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField O4_FIELD_DESC = new org.apache.thrift.protocol.TField("o4", org.apache.thrift.protocol.TType.STRUCT, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new update_table_column_statistics_req_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_table_column_statistics_req_resultTupleSchemeFactory()); + } + + private SetPartitionsStatsResponse success; // required + private NoSuchObjectException o1; // required + private InvalidObjectException o2; // required + private MetaException o3; // required + private InvalidInputException o4; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"), + O4((short)4, "o4"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + case 4: // O4 + return O4; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SetPartitionsStatsResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_table_column_statistics_req_result.class, metaDataMap); + } + + public update_table_column_statistics_req_result() { + } + + public update_table_column_statistics_req_result( + SetPartitionsStatsResponse success, + NoSuchObjectException o1, + InvalidObjectException o2, + MetaException o3, + InvalidInputException o4) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + this.o4 = o4; + } + + /** + * Performs a deep copy on other. + */ + public update_table_column_statistics_req_result(update_table_column_statistics_req_result other) { + if (other.isSetSuccess()) { + this.success = new SetPartitionsStatsResponse(other.success); + } + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } + if (other.isSetO4()) { + this.o4 = new InvalidInputException(other.o4); + } + } + + public update_table_column_statistics_req_result deepCopy() { + return new update_table_column_statistics_req_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + this.o3 = null; + this.o4 = null; + } + + public SetPartitionsStatsResponse getSuccess() { + return this.success; + } + + public void setSuccess(SetPartitionsStatsResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public InvalidObjectException getO2() { + return this.o2; + } + + public void setO2(InvalidObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public InvalidInputException getO4() { + return this.o4; + } + + public void setO4(InvalidInputException o4) { + this.o4 = o4; + } + + public void unsetO4() { + this.o4 = null; + } + + /** Returns true if field o4 is set (has been assigned a value) and false otherwise */ + public boolean isSetO4() { + return this.o4 != null; + } + + public void setO4IsSet(boolean value) { + if (!value) { + this.o4 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((SetPartitionsStatsResponse)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + case O4: + if (value == null) { + unsetO4(); + } else { + setO4((InvalidInputException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + case O4: + return getO4(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + case O4: + return isSetO4(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof update_table_column_statistics_req_result) + return this.equals((update_table_column_statistics_req_result)that); + return false; + } + + public boolean equals(update_table_column_statistics_req_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + boolean this_present_o4 = true && this.isSetO4(); + boolean that_present_o4 = true && that.isSetO4(); + if (this_present_o4 || that_present_o4) { + if (!(this_present_o4 && that_present_o4)) + return false; + if (!this.o4.equals(that.o4)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + + boolean present_o4 = true && (isSetO4()); + list.add(present_o4); + if (present_o4) + list.add(o4); + + return list.hashCode(); + } + + @Override + public int compareTo(update_table_column_statistics_req_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO4()).compareTo(other.isSetO4()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO4()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o4, other.o4); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("update_table_column_statistics_req_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + if (!first) sb.append(", "); + sb.append("o4:"); + if (this.o4 == null) { + sb.append("null"); + } else { + sb.append(this.o4); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class update_table_column_statistics_req_resultStandardSchemeFactory implements SchemeFactory { + public update_table_column_statistics_req_resultStandardScheme getScheme() { + return new update_table_column_statistics_req_resultStandardScheme(); + } + } + + private static class update_table_column_statistics_req_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column_statistics_req_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new SetPartitionsStatsResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // O4 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o4 = new InvalidInputException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_column_statistics_req_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o4 != null) { + oprot.writeFieldBegin(O4_FIELD_DESC); + struct.o4.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class update_table_column_statistics_req_resultTupleSchemeFactory implements SchemeFactory { + public update_table_column_statistics_req_resultTupleScheme getScheme() { + return new update_table_column_statistics_req_resultTupleScheme(); + } + } + + private static class update_table_column_statistics_req_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_req_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + if (struct.isSetO3()) { + optionals.set(3); + } + if (struct.isSetO4()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + if (struct.isSetO4()) { + struct.o4.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_req_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(5); + if (incoming.get(0)) { + struct.success = new SetPartitionsStatsResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(3)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + if (incoming.get(4)) { + struct.o4 = new InvalidInputException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_partition_column_statistics_req_args"); + + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new update_partition_column_statistics_req_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_partition_column_statistics_req_argsTupleSchemeFactory()); + } + + private SetPartitionsStatsRequest req; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQ((short)1, "req"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQ + return REQ; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SetPartitionsStatsRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_partition_column_statistics_req_args.class, metaDataMap); + } + + public update_partition_column_statistics_req_args() { + } + + public update_partition_column_statistics_req_args( + SetPartitionsStatsRequest req) + { + this(); + this.req = req; + } + + /** + * Performs a deep copy on other. + */ + public update_partition_column_statistics_req_args(update_partition_column_statistics_req_args other) { + if (other.isSetReq()) { + this.req = new SetPartitionsStatsRequest(other.req); + } + } + + public update_partition_column_statistics_req_args deepCopy() { + return new update_partition_column_statistics_req_args(this); + } + + @Override + public void clear() { + this.req = null; + } + + public SetPartitionsStatsRequest getReq() { + return this.req; + } + + public void setReq(SetPartitionsStatsRequest req) { + this.req = req; + } + + public void unsetReq() { + this.req = null; + } + + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; + } + + public void setReqIsSet(boolean value) { + if (!value) { + this.req = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQ: + if (value == null) { + unsetReq(); + } else { + setReq((SetPartitionsStatsRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQ: + return getReq(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQ: + return isSetReq(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof update_partition_column_statistics_req_args) + return this.equals((update_partition_column_statistics_req_args)that); + return false; + } + + public boolean equals(update_partition_column_statistics_req_args that) { + if (that == null) + return false; + + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) + return false; + if (!this.req.equals(that.req)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); + + return list.hashCode(); + } + + @Override + public int compareTo(update_partition_column_statistics_req_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("update_partition_column_statistics_req_args("); boolean first = true; - sb.append("stats_obj:"); - if (this.stats_obj == null) { + sb.append("req:"); + if (this.req == null) { sb.append("null"); } else { - sb.append(this.stats_obj); + sb.append(this.req); } first = false; sb.append(")"); @@ -145227,8 +149192,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (stats_obj != null) { - stats_obj.validate(); + if (req != null) { + req.validate(); } } @@ -145248,15 +149213,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class update_table_column_statistics_argsStandardSchemeFactory implements SchemeFactory { - public update_table_column_statistics_argsStandardScheme getScheme() { - return new update_table_column_statistics_argsStandardScheme(); + private static class update_partition_column_statistics_req_argsStandardSchemeFactory implements SchemeFactory { + public update_partition_column_statistics_req_argsStandardScheme getScheme() { + return new update_partition_column_statistics_req_argsStandardScheme(); } } - private static class update_table_column_statistics_argsStandardScheme extends StandardScheme { + private static class update_partition_column_statistics_req_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column_statistics_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_column_statistics_req_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -145266,11 +149231,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column break; } switch (schemeField.id) { - case 1: // STATS_OBJ + case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.stats_obj = new ColumnStatistics(); - struct.stats_obj.read(iprot); - struct.setStats_objIsSet(true); + struct.req = new SetPartitionsStatsRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -145284,13 +149249,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_column_statistics_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_column_statistics_req_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.stats_obj != null) { - oprot.writeFieldBegin(STATS_OBJ_FIELD_DESC); - struct.stats_obj.write(oprot); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -145299,45 +149264,45 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_colum } - private static class update_table_column_statistics_argsTupleSchemeFactory implements SchemeFactory { - public update_table_column_statistics_argsTupleScheme getScheme() { - return new update_table_column_statistics_argsTupleScheme(); + private static class update_partition_column_statistics_req_argsTupleSchemeFactory implements SchemeFactory { + public update_partition_column_statistics_req_argsTupleScheme getScheme() { + return new update_partition_column_statistics_req_argsTupleScheme(); } } - private static class update_table_column_statistics_argsTupleScheme extends TupleScheme { + private static class update_partition_column_statistics_req_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_req_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetStats_obj()) { + if (struct.isSetReq()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetStats_obj()) { - struct.stats_obj.write(oprot); + if (struct.isSetReq()) { + struct.req.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_req_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.stats_obj = new ColumnStatistics(); - struct.stats_obj.read(iprot); - struct.setStats_objIsSet(true); + struct.req = new SetPartitionsStatsRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_column_statistics_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_table_column_statistics_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_partition_column_statistics_req_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); @@ -145345,11 +149310,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_ private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new update_table_column_statistics_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new update_table_column_statistics_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new update_partition_column_statistics_req_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_partition_column_statistics_req_resultTupleSchemeFactory()); } - private boolean success; // required + private SetPartitionsStatsResponse success; // required private NoSuchObjectException o1; // required private InvalidObjectException o2; // required private MetaException o3; // required @@ -145426,13 +149391,11 @@ public String getFieldName() { } // isset id assignments - private static final int __SUCCESS_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SetPartitionsStatsResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -145442,14 +149405,14 @@ public String getFieldName() { tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_table_column_statistics_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_partition_column_statistics_req_result.class, metaDataMap); } - public update_table_column_statistics_result() { + public update_partition_column_statistics_req_result() { } - public update_table_column_statistics_result( - boolean success, + public update_partition_column_statistics_req_result( + SetPartitionsStatsResponse success, NoSuchObjectException o1, InvalidObjectException o2, MetaException o3, @@ -145457,7 +149420,6 @@ public update_table_column_statistics_result( { this(); this.success = success; - setSuccessIsSet(true); this.o1 = o1; this.o2 = o2; this.o3 = o3; @@ -145467,9 +149429,10 @@ public update_table_column_statistics_result( /** * Performs a deep copy on other. */ - public update_table_column_statistics_result(update_table_column_statistics_result other) { - __isset_bitfield = other.__isset_bitfield; - this.success = other.success; + public update_partition_column_statistics_req_result(update_partition_column_statistics_req_result other) { + if (other.isSetSuccess()) { + this.success = new SetPartitionsStatsResponse(other.success); + } if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); } @@ -145484,40 +149447,40 @@ public update_table_column_statistics_result(update_table_column_statistics_resu } } - public update_table_column_statistics_result deepCopy() { - return new update_table_column_statistics_result(this); + public update_partition_column_statistics_req_result deepCopy() { + return new update_partition_column_statistics_req_result(this); } @Override public void clear() { - setSuccessIsSet(false); - this.success = false; + this.success = null; this.o1 = null; this.o2 = null; this.o3 = null; this.o4 = null; } - public boolean isSuccess() { + public SetPartitionsStatsResponse getSuccess() { return this.success; } - public void setSuccess(boolean success) { + public void setSuccess(SetPartitionsStatsResponse success) { this.success = success; - setSuccessIsSet(true); } public void unsetSuccess() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); + this.success = null; } /** Returns true if field success is set (has been assigned a value) and false otherwise */ public boolean isSetSuccess() { - return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); + return this.success != null; } public void setSuccessIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); + if (!value) { + this.success = null; + } } public NoSuchObjectException getO1() { @@ -145618,7 +149581,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((Boolean)value); + setSuccess((SetPartitionsStatsResponse)value); } break; @@ -145660,7 +149623,7 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { case SUCCESS: - return isSuccess(); + return getSuccess(); case O1: return getO1(); @@ -145703,21 +149666,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof update_table_column_statistics_result) - return this.equals((update_table_column_statistics_result)that); + if (that instanceof update_partition_column_statistics_req_result) + return this.equals((update_partition_column_statistics_req_result)that); return false; } - public boolean equals(update_table_column_statistics_result that) { + public boolean equals(update_partition_column_statistics_req_result that) { if (that == null) return false; - boolean this_present_success = true; - boolean that_present_success = true; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); if (this_present_success || that_present_success) { if (!(this_present_success && that_present_success)) return false; - if (this.success != that.success) + if (!this.success.equals(that.success)) return false; } @@ -145764,7 +149727,7 @@ public boolean equals(update_table_column_statistics_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true; + boolean present_success = true && (isSetSuccess()); list.add(present_success); if (present_success) list.add(success); @@ -145793,7 +149756,7 @@ public int hashCode() { } @Override - public int compareTo(update_table_column_statistics_result other) { + public int compareTo(update_partition_column_statistics_req_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -145867,11 +149830,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("update_table_column_statistics_result("); + StringBuilder sb = new StringBuilder("update_partition_column_statistics_req_result("); boolean first = true; sb.append("success:"); - sb.append(this.success); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } first = false; if (!first) sb.append(", "); sb.append("o1:"); @@ -145912,6 +149879,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -145924,23 +149894,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class update_table_column_statistics_resultStandardSchemeFactory implements SchemeFactory { - public update_table_column_statistics_resultStandardScheme getScheme() { - return new update_table_column_statistics_resultStandardScheme(); + private static class update_partition_column_statistics_req_resultStandardSchemeFactory implements SchemeFactory { + public update_partition_column_statistics_req_resultStandardScheme getScheme() { + return new update_partition_column_statistics_req_resultStandardScheme(); } } - private static class update_table_column_statistics_resultStandardScheme extends StandardScheme { + private static class update_partition_column_statistics_req_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column_statistics_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_column_statistics_req_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -145951,8 +149919,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.success = iprot.readBool(); + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new SetPartitionsStatsResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -146003,13 +149972,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_column_statistics_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_column_statistics_req_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetSuccess()) { + if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeBool(struct.success); + struct.success.write(oprot); oprot.writeFieldEnd(); } if (struct.o1 != null) { @@ -146038,16 +150007,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_colum } - private static class update_table_column_statistics_resultTupleSchemeFactory implements SchemeFactory { - public update_table_column_statistics_resultTupleScheme getScheme() { - return new update_table_column_statistics_resultTupleScheme(); + private static class update_partition_column_statistics_req_resultTupleSchemeFactory implements SchemeFactory { + public update_partition_column_statistics_req_resultTupleScheme getScheme() { + return new update_partition_column_statistics_req_resultTupleScheme(); } } - private static class update_table_column_statistics_resultTupleScheme extends TupleScheme { + private static class update_partition_column_statistics_req_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_req_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -146067,7 +150036,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, update_table_column } oprot.writeBitSet(optionals, 5); if (struct.isSetSuccess()) { - oprot.writeBool(struct.success); + struct.success.write(oprot); } if (struct.isSetO1()) { struct.o1.write(oprot); @@ -146084,11 +150053,12 @@ public void write(org.apache.thrift.protocol.TProtocol prot, update_table_column } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_req_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { - struct.success = iprot.readBool(); + struct.success = new SetPartitionsStatsResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { @@ -146116,22 +150086,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_ } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_partition_column_statistics_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_basic_statistics_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_table_basic_statistics_req_args"); - private static final org.apache.thrift.protocol.TField STATS_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("stats_obj", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new update_partition_column_statistics_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new update_partition_column_statistics_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new update_table_basic_statistics_req_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_table_basic_statistics_req_argsTupleSchemeFactory()); } - private ColumnStatistics stats_obj; // required + private SetBasicStatsRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATS_OBJ((short)1, "stats_obj"); + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -146146,8 +150116,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // STATS_OBJ - return STATS_OBJ; + case 1: // REQ + return REQ; default: return null; } @@ -146191,70 +150161,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATS_OBJ, new org.apache.thrift.meta_data.FieldMetaData("stats_obj", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatistics.class))); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SetBasicStatsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_partition_column_statistics_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_table_basic_statistics_req_args.class, metaDataMap); } - public update_partition_column_statistics_args() { + public update_table_basic_statistics_req_args() { } - public update_partition_column_statistics_args( - ColumnStatistics stats_obj) + public update_table_basic_statistics_req_args( + SetBasicStatsRequest req) { this(); - this.stats_obj = stats_obj; + this.req = req; } /** * Performs a deep copy on other. */ - public update_partition_column_statistics_args(update_partition_column_statistics_args other) { - if (other.isSetStats_obj()) { - this.stats_obj = new ColumnStatistics(other.stats_obj); + public update_table_basic_statistics_req_args(update_table_basic_statistics_req_args other) { + if (other.isSetReq()) { + this.req = new SetBasicStatsRequest(other.req); } } - public update_partition_column_statistics_args deepCopy() { - return new update_partition_column_statistics_args(this); + public update_table_basic_statistics_req_args deepCopy() { + return new update_table_basic_statistics_req_args(this); } @Override public void clear() { - this.stats_obj = null; + this.req = null; } - public ColumnStatistics getStats_obj() { - return this.stats_obj; + public SetBasicStatsRequest getReq() { + return this.req; } - public void setStats_obj(ColumnStatistics stats_obj) { - this.stats_obj = stats_obj; + public void setReq(SetBasicStatsRequest req) { + this.req = req; } - public void unsetStats_obj() { - this.stats_obj = null; + public void unsetReq() { + this.req = null; } - /** Returns true if field stats_obj is set (has been assigned a value) and false otherwise */ - public boolean isSetStats_obj() { - return this.stats_obj != null; + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; } - public void setStats_objIsSet(boolean value) { + public void setReqIsSet(boolean value) { if (!value) { - this.stats_obj = null; + this.req = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case STATS_OBJ: + case REQ: if (value == null) { - unsetStats_obj(); + unsetReq(); } else { - setStats_obj((ColumnStatistics)value); + setReq((SetBasicStatsRequest)value); } break; @@ -146263,8 +150233,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case STATS_OBJ: - return getStats_obj(); + case REQ: + return getReq(); } throw new IllegalStateException(); @@ -146277,8 +150247,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case STATS_OBJ: - return isSetStats_obj(); + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -146287,21 +150257,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof update_partition_column_statistics_args) - return this.equals((update_partition_column_statistics_args)that); + if (that instanceof update_table_basic_statistics_req_args) + return this.equals((update_table_basic_statistics_req_args)that); return false; } - public boolean equals(update_partition_column_statistics_args that) { + public boolean equals(update_table_basic_statistics_req_args that) { if (that == null) return false; - boolean this_present_stats_obj = true && this.isSetStats_obj(); - boolean that_present_stats_obj = true && that.isSetStats_obj(); - if (this_present_stats_obj || that_present_stats_obj) { - if (!(this_present_stats_obj && that_present_stats_obj)) + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) return false; - if (!this.stats_obj.equals(that.stats_obj)) + if (!this.req.equals(that.req)) return false; } @@ -146312,28 +150282,28 @@ public boolean equals(update_partition_column_statistics_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_stats_obj = true && (isSetStats_obj()); - list.add(present_stats_obj); - if (present_stats_obj) - list.add(stats_obj); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); return list.hashCode(); } @Override - public int compareTo(update_partition_column_statistics_args other) { + public int compareTo(update_table_basic_statistics_req_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetStats_obj()).compareTo(other.isSetStats_obj()); + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); if (lastComparison != 0) { return lastComparison; } - if (isSetStats_obj()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.stats_obj, other.stats_obj); + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); if (lastComparison != 0) { return lastComparison; } @@ -146355,14 +150325,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("update_partition_column_statistics_args("); + StringBuilder sb = new StringBuilder("update_table_basic_statistics_req_args("); boolean first = true; - sb.append("stats_obj:"); - if (this.stats_obj == null) { + sb.append("req:"); + if (this.req == null) { sb.append("null"); } else { - sb.append(this.stats_obj); + sb.append(this.req); } first = false; sb.append(")"); @@ -146372,8 +150342,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (stats_obj != null) { - stats_obj.validate(); + if (req != null) { + req.validate(); } } @@ -146393,15 +150363,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class update_partition_column_statistics_argsStandardSchemeFactory implements SchemeFactory { - public update_partition_column_statistics_argsStandardScheme getScheme() { - return new update_partition_column_statistics_argsStandardScheme(); + private static class update_table_basic_statistics_req_argsStandardSchemeFactory implements SchemeFactory { + public update_table_basic_statistics_req_argsStandardScheme getScheme() { + return new update_table_basic_statistics_req_argsStandardScheme(); } } - private static class update_partition_column_statistics_argsStandardScheme extends StandardScheme { + private static class update_table_basic_statistics_req_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_column_statistics_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_basic_statistics_req_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -146411,11 +150381,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_co break; } switch (schemeField.id) { - case 1: // STATS_OBJ + case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.stats_obj = new ColumnStatistics(); - struct.stats_obj.read(iprot); - struct.setStats_objIsSet(true); + struct.req = new SetBasicStatsRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -146429,13 +150399,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_co struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_column_statistics_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_basic_statistics_req_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.stats_obj != null) { - oprot.writeFieldBegin(STATS_OBJ_FIELD_DESC); - struct.stats_obj.write(oprot); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -146444,45 +150414,45 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_c } - private static class update_partition_column_statistics_argsTupleSchemeFactory implements SchemeFactory { - public update_partition_column_statistics_argsTupleScheme getScheme() { - return new update_partition_column_statistics_argsTupleScheme(); + private static class update_table_basic_statistics_req_argsTupleSchemeFactory implements SchemeFactory { + public update_table_basic_statistics_req_argsTupleScheme getScheme() { + return new update_table_basic_statistics_req_argsTupleScheme(); } } - private static class update_partition_column_statistics_argsTupleScheme extends TupleScheme { + private static class update_table_basic_statistics_req_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, update_table_basic_statistics_req_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetStats_obj()) { + if (struct.isSetReq()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetStats_obj()) { - struct.stats_obj.write(oprot); + if (struct.isSetReq()) { + struct.req.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, update_table_basic_statistics_req_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.stats_obj = new ColumnStatistics(); - struct.stats_obj.read(iprot); - struct.setStats_objIsSet(true); + struct.req = new SetBasicStatsRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_partition_column_statistics_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_basic_statistics_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_table_basic_statistics_req_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); @@ -146490,11 +150460,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_col private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new update_partition_column_statistics_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new update_partition_column_statistics_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new update_table_basic_statistics_req_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_table_basic_statistics_req_resultTupleSchemeFactory()); } - private boolean success; // required + private SetBasicStatsResponse success; // required private NoSuchObjectException o1; // required private InvalidObjectException o2; // required private MetaException o3; // required @@ -146571,13 +150541,11 @@ public String getFieldName() { } // isset id assignments - private static final int __SUCCESS_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SetBasicStatsResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -146587,14 +150555,14 @@ public String getFieldName() { tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_partition_column_statistics_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_table_basic_statistics_req_result.class, metaDataMap); } - public update_partition_column_statistics_result() { + public update_table_basic_statistics_req_result() { } - public update_partition_column_statistics_result( - boolean success, + public update_table_basic_statistics_req_result( + SetBasicStatsResponse success, NoSuchObjectException o1, InvalidObjectException o2, MetaException o3, @@ -146602,7 +150570,6 @@ public update_partition_column_statistics_result( { this(); this.success = success; - setSuccessIsSet(true); this.o1 = o1; this.o2 = o2; this.o3 = o3; @@ -146612,9 +150579,10 @@ public update_partition_column_statistics_result( /** * Performs a deep copy on other. */ - public update_partition_column_statistics_result(update_partition_column_statistics_result other) { - __isset_bitfield = other.__isset_bitfield; - this.success = other.success; + public update_table_basic_statistics_req_result(update_table_basic_statistics_req_result other) { + if (other.isSetSuccess()) { + this.success = new SetBasicStatsResponse(other.success); + } if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); } @@ -146629,40 +150597,40 @@ public update_partition_column_statistics_result(update_partition_column_statist } } - public update_partition_column_statistics_result deepCopy() { - return new update_partition_column_statistics_result(this); + public update_table_basic_statistics_req_result deepCopy() { + return new update_table_basic_statistics_req_result(this); } @Override public void clear() { - setSuccessIsSet(false); - this.success = false; + this.success = null; this.o1 = null; this.o2 = null; this.o3 = null; this.o4 = null; } - public boolean isSuccess() { + public SetBasicStatsResponse getSuccess() { return this.success; } - public void setSuccess(boolean success) { + public void setSuccess(SetBasicStatsResponse success) { this.success = success; - setSuccessIsSet(true); } public void unsetSuccess() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); + this.success = null; } /** Returns true if field success is set (has been assigned a value) and false otherwise */ public boolean isSetSuccess() { - return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); + return this.success != null; } public void setSuccessIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); + if (!value) { + this.success = null; + } } public NoSuchObjectException getO1() { @@ -146763,7 +150731,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((Boolean)value); + setSuccess((SetBasicStatsResponse)value); } break; @@ -146805,7 +150773,7 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { case SUCCESS: - return isSuccess(); + return getSuccess(); case O1: return getO1(); @@ -146848,21 +150816,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof update_partition_column_statistics_result) - return this.equals((update_partition_column_statistics_result)that); + if (that instanceof update_table_basic_statistics_req_result) + return this.equals((update_table_basic_statistics_req_result)that); return false; } - public boolean equals(update_partition_column_statistics_result that) { + public boolean equals(update_table_basic_statistics_req_result that) { if (that == null) return false; - boolean this_present_success = true; - boolean that_present_success = true; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); if (this_present_success || that_present_success) { if (!(this_present_success && that_present_success)) return false; - if (this.success != that.success) + if (!this.success.equals(that.success)) return false; } @@ -146909,7 +150877,7 @@ public boolean equals(update_partition_column_statistics_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true; + boolean present_success = true && (isSetSuccess()); list.add(present_success); if (present_success) list.add(success); @@ -146938,7 +150906,7 @@ public int hashCode() { } @Override - public int compareTo(update_partition_column_statistics_result other) { + public int compareTo(update_table_basic_statistics_req_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -147012,11 +150980,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("update_partition_column_statistics_result("); + StringBuilder sb = new StringBuilder("update_table_basic_statistics_req_result("); boolean first = true; sb.append("success:"); - sb.append(this.success); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } first = false; if (!first) sb.append(", "); sb.append("o1:"); @@ -147057,6 +151029,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -147069,23 +151044,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class update_partition_column_statistics_resultStandardSchemeFactory implements SchemeFactory { - public update_partition_column_statistics_resultStandardScheme getScheme() { - return new update_partition_column_statistics_resultStandardScheme(); + private static class update_table_basic_statistics_req_resultStandardSchemeFactory implements SchemeFactory { + public update_table_basic_statistics_req_resultStandardScheme getScheme() { + return new update_table_basic_statistics_req_resultStandardScheme(); } } - private static class update_partition_column_statistics_resultStandardScheme extends StandardScheme { + private static class update_table_basic_statistics_req_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_column_statistics_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_basic_statistics_req_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -147096,8 +151069,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_co } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.success = iprot.readBool(); + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new SetBasicStatsResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -147148,13 +151122,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_co struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_column_statistics_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_basic_statistics_req_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetSuccess()) { + if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeBool(struct.success); + struct.success.write(oprot); oprot.writeFieldEnd(); } if (struct.o1 != null) { @@ -147183,16 +151157,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_c } - private static class update_partition_column_statistics_resultTupleSchemeFactory implements SchemeFactory { - public update_partition_column_statistics_resultTupleScheme getScheme() { - return new update_partition_column_statistics_resultTupleScheme(); + private static class update_table_basic_statistics_req_resultTupleSchemeFactory implements SchemeFactory { + public update_table_basic_statistics_req_resultTupleScheme getScheme() { + return new update_table_basic_statistics_req_resultTupleScheme(); } } - private static class update_partition_column_statistics_resultTupleScheme extends TupleScheme { + private static class update_table_basic_statistics_req_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, update_table_basic_statistics_req_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -147212,7 +151186,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_co } oprot.writeBitSet(optionals, 5); if (struct.isSetSuccess()) { - oprot.writeBool(struct.success); + struct.success.write(oprot); } if (struct.isSetO1()) { struct.o1.write(oprot); @@ -147229,11 +151203,12 @@ public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_co } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, update_table_basic_statistics_req_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { - struct.success = iprot.readBool(); + struct.success = new SetBasicStatsResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { @@ -147261,18 +151236,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_col } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_column_statistics_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_table_column_statistics_req_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_basic_statistics_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_partition_basic_statistics_req_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new update_table_column_statistics_req_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new update_table_column_statistics_req_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new update_partition_basic_statistics_req_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_partition_basic_statistics_req_argsTupleSchemeFactory()); } - private SetPartitionsStatsRequest req; // required + private SetBasicStatsRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -147337,16 +151312,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SetPartitionsStatsRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SetBasicStatsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_table_column_statistics_req_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_partition_basic_statistics_req_args.class, metaDataMap); } - public update_table_column_statistics_req_args() { + public update_partition_basic_statistics_req_args() { } - public update_table_column_statistics_req_args( - SetPartitionsStatsRequest req) + public update_partition_basic_statistics_req_args( + SetBasicStatsRequest req) { this(); this.req = req; @@ -147355,14 +151330,14 @@ public update_table_column_statistics_req_args( /** * Performs a deep copy on other. */ - public update_table_column_statistics_req_args(update_table_column_statistics_req_args other) { + public update_partition_basic_statistics_req_args(update_partition_basic_statistics_req_args other) { if (other.isSetReq()) { - this.req = new SetPartitionsStatsRequest(other.req); + this.req = new SetBasicStatsRequest(other.req); } } - public update_table_column_statistics_req_args deepCopy() { - return new update_table_column_statistics_req_args(this); + public update_partition_basic_statistics_req_args deepCopy() { + return new update_partition_basic_statistics_req_args(this); } @Override @@ -147370,11 +151345,11 @@ public void clear() { this.req = null; } - public SetPartitionsStatsRequest getReq() { + public SetBasicStatsRequest getReq() { return this.req; } - public void setReq(SetPartitionsStatsRequest req) { + public void setReq(SetBasicStatsRequest req) { this.req = req; } @@ -147399,7 +151374,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((SetPartitionsStatsRequest)value); + setReq((SetBasicStatsRequest)value); } break; @@ -147432,12 +151407,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof update_table_column_statistics_req_args) - return this.equals((update_table_column_statistics_req_args)that); + if (that instanceof update_partition_basic_statistics_req_args) + return this.equals((update_partition_basic_statistics_req_args)that); return false; } - public boolean equals(update_table_column_statistics_req_args that) { + public boolean equals(update_partition_basic_statistics_req_args that) { if (that == null) return false; @@ -147466,7 +151441,7 @@ public int hashCode() { } @Override - public int compareTo(update_table_column_statistics_req_args other) { + public int compareTo(update_partition_basic_statistics_req_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -147500,7 +151475,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("update_table_column_statistics_req_args("); + StringBuilder sb = new StringBuilder("update_partition_basic_statistics_req_args("); boolean first = true; sb.append("req:"); @@ -147538,15 +151513,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class update_table_column_statistics_req_argsStandardSchemeFactory implements SchemeFactory { - public update_table_column_statistics_req_argsStandardScheme getScheme() { - return new update_table_column_statistics_req_argsStandardScheme(); + private static class update_partition_basic_statistics_req_argsStandardSchemeFactory implements SchemeFactory { + public update_partition_basic_statistics_req_argsStandardScheme getScheme() { + return new update_partition_basic_statistics_req_argsStandardScheme(); } } - private static class update_table_column_statistics_req_argsStandardScheme extends StandardScheme { + private static class update_partition_basic_statistics_req_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column_statistics_req_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_basic_statistics_req_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -147558,7 +151533,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new SetPartitionsStatsRequest(); + struct.req = new SetBasicStatsRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -147574,7 +151549,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_column_statistics_req_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_basic_statistics_req_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -147589,16 +151564,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_colum } - private static class update_table_column_statistics_req_argsTupleSchemeFactory implements SchemeFactory { - public update_table_column_statistics_req_argsTupleScheme getScheme() { - return new update_table_column_statistics_req_argsTupleScheme(); + private static class update_partition_basic_statistics_req_argsTupleSchemeFactory implements SchemeFactory { + public update_partition_basic_statistics_req_argsTupleScheme getScheme() { + return new update_partition_basic_statistics_req_argsTupleScheme(); } } - private static class update_table_column_statistics_req_argsTupleScheme extends TupleScheme { + private static class update_partition_basic_statistics_req_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_req_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_basic_statistics_req_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -147611,11 +151586,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, update_table_column } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_req_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_basic_statistics_req_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new SetPartitionsStatsRequest(); + struct.req = new SetBasicStatsRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -147624,8 +151599,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_ } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_column_statistics_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_table_column_statistics_req_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_basic_statistics_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_partition_basic_statistics_req_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -147635,11 +151610,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_ private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new update_table_column_statistics_req_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new update_table_column_statistics_req_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new update_partition_basic_statistics_req_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_partition_basic_statistics_req_resultTupleSchemeFactory()); } - private SetPartitionsStatsResponse success; // required + private SetBasicStatsResponse success; // required private NoSuchObjectException o1; // required private InvalidObjectException o2; // required private MetaException o3; // required @@ -147720,7 +151695,7 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SetPartitionsStatsResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SetBasicStatsResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -147730,14 +151705,14 @@ public String getFieldName() { tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_table_column_statistics_req_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_partition_basic_statistics_req_result.class, metaDataMap); } - public update_table_column_statistics_req_result() { + public update_partition_basic_statistics_req_result() { } - public update_table_column_statistics_req_result( - SetPartitionsStatsResponse success, + public update_partition_basic_statistics_req_result( + SetBasicStatsResponse success, NoSuchObjectException o1, InvalidObjectException o2, MetaException o3, @@ -147754,9 +151729,9 @@ public update_table_column_statistics_req_result( /** * Performs a deep copy on other. */ - public update_table_column_statistics_req_result(update_table_column_statistics_req_result other) { + public update_partition_basic_statistics_req_result(update_partition_basic_statistics_req_result other) { if (other.isSetSuccess()) { - this.success = new SetPartitionsStatsResponse(other.success); + this.success = new SetBasicStatsResponse(other.success); } if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); @@ -147772,8 +151747,8 @@ public update_table_column_statistics_req_result(update_table_column_statistics_ } } - public update_table_column_statistics_req_result deepCopy() { - return new update_table_column_statistics_req_result(this); + public update_partition_basic_statistics_req_result deepCopy() { + return new update_partition_basic_statistics_req_result(this); } @Override @@ -147785,11 +151760,11 @@ public void clear() { this.o4 = null; } - public SetPartitionsStatsResponse getSuccess() { + public SetBasicStatsResponse getSuccess() { return this.success; } - public void setSuccess(SetPartitionsStatsResponse success) { + public void setSuccess(SetBasicStatsResponse success) { this.success = success; } @@ -147906,7 +151881,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((SetPartitionsStatsResponse)value); + setSuccess((SetBasicStatsResponse)value); } break; @@ -147991,12 +151966,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof update_table_column_statistics_req_result) - return this.equals((update_table_column_statistics_req_result)that); + if (that instanceof update_partition_basic_statistics_req_result) + return this.equals((update_partition_basic_statistics_req_result)that); return false; } - public boolean equals(update_table_column_statistics_req_result that) { + public boolean equals(update_partition_basic_statistics_req_result that) { if (that == null) return false; @@ -148081,7 +152056,7 @@ public int hashCode() { } @Override - public int compareTo(update_table_column_statistics_req_result other) { + public int compareTo(update_partition_basic_statistics_req_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -148155,7 +152130,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("update_table_column_statistics_req_result("); + StringBuilder sb = new StringBuilder("update_partition_basic_statistics_req_result("); boolean first = true; sb.append("success:"); @@ -148225,15 +152200,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class update_table_column_statistics_req_resultStandardSchemeFactory implements SchemeFactory { - public update_table_column_statistics_req_resultStandardScheme getScheme() { - return new update_table_column_statistics_req_resultStandardScheme(); + private static class update_partition_basic_statistics_req_resultStandardSchemeFactory implements SchemeFactory { + public update_partition_basic_statistics_req_resultStandardScheme getScheme() { + return new update_partition_basic_statistics_req_resultStandardScheme(); } } - private static class update_table_column_statistics_req_resultStandardScheme extends StandardScheme { + private static class update_partition_basic_statistics_req_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column_statistics_req_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_basic_statistics_req_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -148245,7 +152220,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new SetPartitionsStatsResponse(); + struct.success = new SetBasicStatsResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -148297,7 +152272,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_column_statistics_req_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_basic_statistics_req_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -148332,16 +152307,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_colum } - private static class update_table_column_statistics_req_resultTupleSchemeFactory implements SchemeFactory { - public update_table_column_statistics_req_resultTupleScheme getScheme() { - return new update_table_column_statistics_req_resultTupleScheme(); + private static class update_partition_basic_statistics_req_resultTupleSchemeFactory implements SchemeFactory { + public update_partition_basic_statistics_req_resultTupleScheme getScheme() { + return new update_partition_basic_statistics_req_resultTupleScheme(); } } - private static class update_table_column_statistics_req_resultTupleScheme extends TupleScheme { + private static class update_partition_basic_statistics_req_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_req_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_basic_statistics_req_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -148378,11 +152353,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, update_table_column } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_req_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_basic_statistics_req_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { - struct.success = new SetPartitionsStatsResponse(); + struct.success = new SetBasicStatsResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -148411,18 +152386,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_ } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_partition_column_statistics_req_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class invalidate_all_column_statistics_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("invalidate_all_column_statistics_req_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new update_partition_column_statistics_req_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new update_partition_column_statistics_req_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new invalidate_all_column_statistics_req_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new invalidate_all_column_statistics_req_argsTupleSchemeFactory()); } - private SetPartitionsStatsRequest req; // required + private InvalidateColumnStatsRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -148487,16 +152462,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SetPartitionsStatsRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, InvalidateColumnStatsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_partition_column_statistics_req_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(invalidate_all_column_statistics_req_args.class, metaDataMap); } - public update_partition_column_statistics_req_args() { + public invalidate_all_column_statistics_req_args() { } - public update_partition_column_statistics_req_args( - SetPartitionsStatsRequest req) + public invalidate_all_column_statistics_req_args( + InvalidateColumnStatsRequest req) { this(); this.req = req; @@ -148505,14 +152480,14 @@ public update_partition_column_statistics_req_args( /** * Performs a deep copy on other. */ - public update_partition_column_statistics_req_args(update_partition_column_statistics_req_args other) { + public invalidate_all_column_statistics_req_args(invalidate_all_column_statistics_req_args other) { if (other.isSetReq()) { - this.req = new SetPartitionsStatsRequest(other.req); + this.req = new InvalidateColumnStatsRequest(other.req); } } - public update_partition_column_statistics_req_args deepCopy() { - return new update_partition_column_statistics_req_args(this); + public invalidate_all_column_statistics_req_args deepCopy() { + return new invalidate_all_column_statistics_req_args(this); } @Override @@ -148520,11 +152495,11 @@ public void clear() { this.req = null; } - public SetPartitionsStatsRequest getReq() { + public InvalidateColumnStatsRequest getReq() { return this.req; } - public void setReq(SetPartitionsStatsRequest req) { + public void setReq(InvalidateColumnStatsRequest req) { this.req = req; } @@ -148549,7 +152524,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((SetPartitionsStatsRequest)value); + setReq((InvalidateColumnStatsRequest)value); } break; @@ -148582,12 +152557,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof update_partition_column_statistics_req_args) - return this.equals((update_partition_column_statistics_req_args)that); + if (that instanceof invalidate_all_column_statistics_req_args) + return this.equals((invalidate_all_column_statistics_req_args)that); return false; } - public boolean equals(update_partition_column_statistics_req_args that) { + public boolean equals(invalidate_all_column_statistics_req_args that) { if (that == null) return false; @@ -148616,7 +152591,7 @@ public int hashCode() { } @Override - public int compareTo(update_partition_column_statistics_req_args other) { + public int compareTo(invalidate_all_column_statistics_req_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -148650,7 +152625,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("update_partition_column_statistics_req_args("); + StringBuilder sb = new StringBuilder("invalidate_all_column_statistics_req_args("); boolean first = true; sb.append("req:"); @@ -148688,15 +152663,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class update_partition_column_statistics_req_argsStandardSchemeFactory implements SchemeFactory { - public update_partition_column_statistics_req_argsStandardScheme getScheme() { - return new update_partition_column_statistics_req_argsStandardScheme(); + private static class invalidate_all_column_statistics_req_argsStandardSchemeFactory implements SchemeFactory { + public invalidate_all_column_statistics_req_argsStandardScheme getScheme() { + return new invalidate_all_column_statistics_req_argsStandardScheme(); } } - private static class update_partition_column_statistics_req_argsStandardScheme extends StandardScheme { + private static class invalidate_all_column_statistics_req_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_column_statistics_req_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, invalidate_all_column_statistics_req_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -148708,7 +152683,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_co switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new SetPartitionsStatsRequest(); + struct.req = new InvalidateColumnStatsRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -148724,7 +152699,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_co struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_column_statistics_req_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, invalidate_all_column_statistics_req_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -148739,16 +152714,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_c } - private static class update_partition_column_statistics_req_argsTupleSchemeFactory implements SchemeFactory { - public update_partition_column_statistics_req_argsTupleScheme getScheme() { - return new update_partition_column_statistics_req_argsTupleScheme(); + private static class invalidate_all_column_statistics_req_argsTupleSchemeFactory implements SchemeFactory { + public invalidate_all_column_statistics_req_argsTupleScheme getScheme() { + return new invalidate_all_column_statistics_req_argsTupleScheme(); } } - private static class update_partition_column_statistics_req_argsTupleScheme extends TupleScheme { + private static class invalidate_all_column_statistics_req_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_req_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, invalidate_all_column_statistics_req_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -148761,11 +152736,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_co } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_req_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, invalidate_all_column_statistics_req_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new SetPartitionsStatsRequest(); + struct.req = new InvalidateColumnStatsRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -148774,8 +152749,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_col } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_partition_column_statistics_req_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class invalidate_all_column_statistics_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("invalidate_all_column_statistics_req_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -148785,11 +152760,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_col private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new update_partition_column_statistics_req_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new update_partition_column_statistics_req_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new invalidate_all_column_statistics_req_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new invalidate_all_column_statistics_req_resultTupleSchemeFactory()); } - private SetPartitionsStatsResponse success; // required + private InvalidateColumnStatsResponse success; // required private NoSuchObjectException o1; // required private InvalidObjectException o2; // required private MetaException o3; // required @@ -148870,7 +152845,7 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SetPartitionsStatsResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, InvalidateColumnStatsResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -148880,14 +152855,14 @@ public String getFieldName() { tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_partition_column_statistics_req_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(invalidate_all_column_statistics_req_result.class, metaDataMap); } - public update_partition_column_statistics_req_result() { + public invalidate_all_column_statistics_req_result() { } - public update_partition_column_statistics_req_result( - SetPartitionsStatsResponse success, + public invalidate_all_column_statistics_req_result( + InvalidateColumnStatsResponse success, NoSuchObjectException o1, InvalidObjectException o2, MetaException o3, @@ -148904,9 +152879,9 @@ public update_partition_column_statistics_req_result( /** * Performs a deep copy on other. */ - public update_partition_column_statistics_req_result(update_partition_column_statistics_req_result other) { + public invalidate_all_column_statistics_req_result(invalidate_all_column_statistics_req_result other) { if (other.isSetSuccess()) { - this.success = new SetPartitionsStatsResponse(other.success); + this.success = new InvalidateColumnStatsResponse(other.success); } if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); @@ -148922,8 +152897,8 @@ public update_partition_column_statistics_req_result(update_partition_column_sta } } - public update_partition_column_statistics_req_result deepCopy() { - return new update_partition_column_statistics_req_result(this); + public invalidate_all_column_statistics_req_result deepCopy() { + return new invalidate_all_column_statistics_req_result(this); } @Override @@ -148935,11 +152910,11 @@ public void clear() { this.o4 = null; } - public SetPartitionsStatsResponse getSuccess() { + public InvalidateColumnStatsResponse getSuccess() { return this.success; } - public void setSuccess(SetPartitionsStatsResponse success) { + public void setSuccess(InvalidateColumnStatsResponse success) { this.success = success; } @@ -149056,7 +153031,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((SetPartitionsStatsResponse)value); + setSuccess((InvalidateColumnStatsResponse)value); } break; @@ -149141,12 +153116,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof update_partition_column_statistics_req_result) - return this.equals((update_partition_column_statistics_req_result)that); + if (that instanceof invalidate_all_column_statistics_req_result) + return this.equals((invalidate_all_column_statistics_req_result)that); return false; } - public boolean equals(update_partition_column_statistics_req_result that) { + public boolean equals(invalidate_all_column_statistics_req_result that) { if (that == null) return false; @@ -149231,7 +153206,7 @@ public int hashCode() { } @Override - public int compareTo(update_partition_column_statistics_req_result other) { + public int compareTo(invalidate_all_column_statistics_req_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -149305,7 +153280,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("update_partition_column_statistics_req_result("); + StringBuilder sb = new StringBuilder("invalidate_all_column_statistics_req_result("); boolean first = true; sb.append("success:"); @@ -149375,15 +153350,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class update_partition_column_statistics_req_resultStandardSchemeFactory implements SchemeFactory { - public update_partition_column_statistics_req_resultStandardScheme getScheme() { - return new update_partition_column_statistics_req_resultStandardScheme(); + private static class invalidate_all_column_statistics_req_resultStandardSchemeFactory implements SchemeFactory { + public invalidate_all_column_statistics_req_resultStandardScheme getScheme() { + return new invalidate_all_column_statistics_req_resultStandardScheme(); } } - private static class update_partition_column_statistics_req_resultStandardScheme extends StandardScheme { + private static class invalidate_all_column_statistics_req_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_column_statistics_req_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, invalidate_all_column_statistics_req_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -149395,7 +153370,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_co switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new SetPartitionsStatsResponse(); + struct.success = new InvalidateColumnStatsResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -149447,7 +153422,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_co struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_column_statistics_req_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, invalidate_all_column_statistics_req_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -149482,16 +153457,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_c } - private static class update_partition_column_statistics_req_resultTupleSchemeFactory implements SchemeFactory { - public update_partition_column_statistics_req_resultTupleScheme getScheme() { - return new update_partition_column_statistics_req_resultTupleScheme(); + private static class invalidate_all_column_statistics_req_resultTupleSchemeFactory implements SchemeFactory { + public invalidate_all_column_statistics_req_resultTupleScheme getScheme() { + return new invalidate_all_column_statistics_req_resultTupleScheme(); } } - private static class update_partition_column_statistics_req_resultTupleScheme extends TupleScheme { + private static class invalidate_all_column_statistics_req_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_req_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, invalidate_all_column_statistics_req_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -149528,11 +153503,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_co } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_req_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, invalidate_all_column_statistics_req_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { - struct.success = new SetPartitionsStatsResponse(); + struct.success = new InvalidateColumnStatsResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -162991,13 +166966,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1452 = iprot.readListBegin(); - struct.success = new ArrayList(_list1452.size); - String _elem1453; - for (int _i1454 = 0; _i1454 < _list1452.size; ++_i1454) + org.apache.thrift.protocol.TList _list1462 = iprot.readListBegin(); + struct.success = new ArrayList(_list1462.size); + String _elem1463; + for (int _i1464 = 0; _i1464 < _list1462.size; ++_i1464) { - _elem1453 = iprot.readString(); - struct.success.add(_elem1453); + _elem1463 = iprot.readString(); + struct.success.add(_elem1463); } iprot.readListEnd(); } @@ -163032,9 +167007,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1455 : struct.success) + for (String _iter1465 : struct.success) { - oprot.writeString(_iter1455); + oprot.writeString(_iter1465); } oprot.writeListEnd(); } @@ -163073,9 +167048,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1456 : struct.success) + for (String _iter1466 : struct.success) { - oprot.writeString(_iter1456); + oprot.writeString(_iter1466); } } } @@ -163090,13 +167065,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1457 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1457.size); - String _elem1458; - for (int _i1459 = 0; _i1459 < _list1457.size; ++_i1459) + org.apache.thrift.protocol.TList _list1467 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1467.size); + String _elem1468; + for (int _i1469 = 0; _i1469 < _list1467.size; ++_i1469) { - _elem1458 = iprot.readString(); - struct.success.add(_elem1458); + _elem1468 = iprot.readString(); + struct.success.add(_elem1468); } } struct.setSuccessIsSet(true); @@ -167151,13 +171126,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1460 = iprot.readListBegin(); - struct.success = new ArrayList(_list1460.size); - String _elem1461; - for (int _i1462 = 0; _i1462 < _list1460.size; ++_i1462) + org.apache.thrift.protocol.TList _list1470 = iprot.readListBegin(); + struct.success = new ArrayList(_list1470.size); + String _elem1471; + for (int _i1472 = 0; _i1472 < _list1470.size; ++_i1472) { - _elem1461 = iprot.readString(); - struct.success.add(_elem1461); + _elem1471 = iprot.readString(); + struct.success.add(_elem1471); } iprot.readListEnd(); } @@ -167192,9 +171167,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1463 : struct.success) + for (String _iter1473 : struct.success) { - oprot.writeString(_iter1463); + oprot.writeString(_iter1473); } oprot.writeListEnd(); } @@ -167233,9 +171208,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1464 : struct.success) + for (String _iter1474 : struct.success) { - oprot.writeString(_iter1464); + oprot.writeString(_iter1474); } } } @@ -167250,13 +171225,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1465 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1465.size); - String _elem1466; - for (int _i1467 = 0; _i1467 < _list1465.size; ++_i1467) + org.apache.thrift.protocol.TList _list1475 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1475.size); + String _elem1476; + for (int _i1477 = 0; _i1477 < _list1475.size; ++_i1477) { - _elem1466 = iprot.readString(); - struct.success.add(_elem1466); + _elem1476 = iprot.readString(); + struct.success.add(_elem1476); } } struct.setSuccessIsSet(true); @@ -170547,14 +174522,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1468 = iprot.readListBegin(); - struct.success = new ArrayList(_list1468.size); - Role _elem1469; - for (int _i1470 = 0; _i1470 < _list1468.size; ++_i1470) + org.apache.thrift.protocol.TList _list1478 = iprot.readListBegin(); + struct.success = new ArrayList(_list1478.size); + Role _elem1479; + for (int _i1480 = 0; _i1480 < _list1478.size; ++_i1480) { - _elem1469 = new Role(); - _elem1469.read(iprot); - struct.success.add(_elem1469); + _elem1479 = new Role(); + _elem1479.read(iprot); + struct.success.add(_elem1479); } iprot.readListEnd(); } @@ -170589,9 +174564,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1471 : struct.success) + for (Role _iter1481 : struct.success) { - _iter1471.write(oprot); + _iter1481.write(oprot); } oprot.writeListEnd(); } @@ -170630,9 +174605,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1472 : struct.success) + for (Role _iter1482 : struct.success) { - _iter1472.write(oprot); + _iter1482.write(oprot); } } } @@ -170647,14 +174622,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1473 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1473.size); - Role _elem1474; - for (int _i1475 = 0; _i1475 < _list1473.size; ++_i1475) + org.apache.thrift.protocol.TList _list1483 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1483.size); + Role _elem1484; + for (int _i1485 = 0; _i1485 < _list1483.size; ++_i1485) { - _elem1474 = new Role(); - _elem1474.read(iprot); - struct.success.add(_elem1474); + _elem1484 = new Role(); + _elem1484.read(iprot); + struct.success.add(_elem1484); } } struct.setSuccessIsSet(true); @@ -173659,13 +177634,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1476 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1476.size); - String _elem1477; - for (int _i1478 = 0; _i1478 < _list1476.size; ++_i1478) + org.apache.thrift.protocol.TList _list1486 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1486.size); + String _elem1487; + for (int _i1488 = 0; _i1488 < _list1486.size; ++_i1488) { - _elem1477 = iprot.readString(); - struct.group_names.add(_elem1477); + _elem1487 = iprot.readString(); + struct.group_names.add(_elem1487); } iprot.readListEnd(); } @@ -173701,9 +177676,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1479 : struct.group_names) + for (String _iter1489 : struct.group_names) { - oprot.writeString(_iter1479); + oprot.writeString(_iter1489); } oprot.writeListEnd(); } @@ -173746,9 +177721,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1480 : struct.group_names) + for (String _iter1490 : struct.group_names) { - oprot.writeString(_iter1480); + oprot.writeString(_iter1490); } } } @@ -173769,13 +177744,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1481 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1481.size); - String _elem1482; - for (int _i1483 = 0; _i1483 < _list1481.size; ++_i1483) + org.apache.thrift.protocol.TList _list1491 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1491.size); + String _elem1492; + for (int _i1493 = 0; _i1493 < _list1491.size; ++_i1493) { - _elem1482 = iprot.readString(); - struct.group_names.add(_elem1482); + _elem1492 = iprot.readString(); + struct.group_names.add(_elem1492); } } struct.setGroup_namesIsSet(true); @@ -175233,14 +179208,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1484 = iprot.readListBegin(); - struct.success = new ArrayList(_list1484.size); - HiveObjectPrivilege _elem1485; - for (int _i1486 = 0; _i1486 < _list1484.size; ++_i1486) + org.apache.thrift.protocol.TList _list1494 = iprot.readListBegin(); + struct.success = new ArrayList(_list1494.size); + HiveObjectPrivilege _elem1495; + for (int _i1496 = 0; _i1496 < _list1494.size; ++_i1496) { - _elem1485 = new HiveObjectPrivilege(); - _elem1485.read(iprot); - struct.success.add(_elem1485); + _elem1495 = new HiveObjectPrivilege(); + _elem1495.read(iprot); + struct.success.add(_elem1495); } iprot.readListEnd(); } @@ -175275,9 +179250,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1487 : struct.success) + for (HiveObjectPrivilege _iter1497 : struct.success) { - _iter1487.write(oprot); + _iter1497.write(oprot); } oprot.writeListEnd(); } @@ -175316,9 +179291,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1488 : struct.success) + for (HiveObjectPrivilege _iter1498 : struct.success) { - _iter1488.write(oprot); + _iter1498.write(oprot); } } } @@ -175333,14 +179308,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1489 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1489.size); - HiveObjectPrivilege _elem1490; - for (int _i1491 = 0; _i1491 < _list1489.size; ++_i1491) + org.apache.thrift.protocol.TList _list1499 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1499.size); + HiveObjectPrivilege _elem1500; + for (int _i1501 = 0; _i1501 < _list1499.size; ++_i1501) { - _elem1490 = new HiveObjectPrivilege(); - _elem1490.read(iprot); - struct.success.add(_elem1490); + _elem1500 = new HiveObjectPrivilege(); + _elem1500.read(iprot); + struct.success.add(_elem1500); } } struct.setSuccessIsSet(true); @@ -179287,13 +183262,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1492 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1492.size); - String _elem1493; - for (int _i1494 = 0; _i1494 < _list1492.size; ++_i1494) + org.apache.thrift.protocol.TList _list1502 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1502.size); + String _elem1503; + for (int _i1504 = 0; _i1504 < _list1502.size; ++_i1504) { - _elem1493 = iprot.readString(); - struct.group_names.add(_elem1493); + _elem1503 = iprot.readString(); + struct.group_names.add(_elem1503); } iprot.readListEnd(); } @@ -179324,9 +183299,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1495 : struct.group_names) + for (String _iter1505 : struct.group_names) { - oprot.writeString(_iter1495); + oprot.writeString(_iter1505); } oprot.writeListEnd(); } @@ -179363,9 +183338,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1496 : struct.group_names) + for (String _iter1506 : struct.group_names) { - oprot.writeString(_iter1496); + oprot.writeString(_iter1506); } } } @@ -179381,13 +183356,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1497 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1497.size); - String _elem1498; - for (int _i1499 = 0; _i1499 < _list1497.size; ++_i1499) + org.apache.thrift.protocol.TList _list1507 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1507.size); + String _elem1508; + for (int _i1509 = 0; _i1509 < _list1507.size; ++_i1509) { - _elem1498 = iprot.readString(); - struct.group_names.add(_elem1498); + _elem1508 = iprot.readString(); + struct.group_names.add(_elem1508); } } struct.setGroup_namesIsSet(true); @@ -179790,13 +183765,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1500 = iprot.readListBegin(); - struct.success = new ArrayList(_list1500.size); - String _elem1501; - for (int _i1502 = 0; _i1502 < _list1500.size; ++_i1502) + org.apache.thrift.protocol.TList _list1510 = iprot.readListBegin(); + struct.success = new ArrayList(_list1510.size); + String _elem1511; + for (int _i1512 = 0; _i1512 < _list1510.size; ++_i1512) { - _elem1501 = iprot.readString(); - struct.success.add(_elem1501); + _elem1511 = iprot.readString(); + struct.success.add(_elem1511); } iprot.readListEnd(); } @@ -179831,9 +183806,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1503 : struct.success) + for (String _iter1513 : struct.success) { - oprot.writeString(_iter1503); + oprot.writeString(_iter1513); } oprot.writeListEnd(); } @@ -179872,9 +183847,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1504 : struct.success) + for (String _iter1514 : struct.success) { - oprot.writeString(_iter1504); + oprot.writeString(_iter1514); } } } @@ -179889,13 +183864,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1505 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1505.size); - String _elem1506; - for (int _i1507 = 0; _i1507 < _list1505.size; ++_i1507) + org.apache.thrift.protocol.TList _list1515 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1515.size); + String _elem1516; + for (int _i1517 = 0; _i1517 < _list1515.size; ++_i1517) { - _elem1506 = iprot.readString(); - struct.success.add(_elem1506); + _elem1516 = iprot.readString(); + struct.success.add(_elem1516); } } struct.setSuccessIsSet(true); @@ -185186,13 +189161,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1508 = iprot.readListBegin(); - struct.success = new ArrayList(_list1508.size); - String _elem1509; - for (int _i1510 = 0; _i1510 < _list1508.size; ++_i1510) + org.apache.thrift.protocol.TList _list1518 = iprot.readListBegin(); + struct.success = new ArrayList(_list1518.size); + String _elem1519; + for (int _i1520 = 0; _i1520 < _list1518.size; ++_i1520) { - _elem1509 = iprot.readString(); - struct.success.add(_elem1509); + _elem1519 = iprot.readString(); + struct.success.add(_elem1519); } iprot.readListEnd(); } @@ -185218,9 +189193,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1511 : struct.success) + for (String _iter1521 : struct.success) { - oprot.writeString(_iter1511); + oprot.writeString(_iter1521); } oprot.writeListEnd(); } @@ -185251,9 +189226,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1512 : struct.success) + for (String _iter1522 : struct.success) { - oprot.writeString(_iter1512); + oprot.writeString(_iter1522); } } } @@ -185265,13 +189240,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1513 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1513.size); - String _elem1514; - for (int _i1515 = 0; _i1515 < _list1513.size; ++_i1515) + org.apache.thrift.protocol.TList _list1523 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1523.size); + String _elem1524; + for (int _i1525 = 0; _i1525 < _list1523.size; ++_i1525) { - _elem1514 = iprot.readString(); - struct.success.add(_elem1514); + _elem1524 = iprot.readString(); + struct.success.add(_elem1524); } } struct.setSuccessIsSet(true); @@ -188301,13 +192276,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1516 = iprot.readListBegin(); - struct.success = new ArrayList(_list1516.size); - String _elem1517; - for (int _i1518 = 0; _i1518 < _list1516.size; ++_i1518) + org.apache.thrift.protocol.TList _list1526 = iprot.readListBegin(); + struct.success = new ArrayList(_list1526.size); + String _elem1527; + for (int _i1528 = 0; _i1528 < _list1526.size; ++_i1528) { - _elem1517 = iprot.readString(); - struct.success.add(_elem1517); + _elem1527 = iprot.readString(); + struct.success.add(_elem1527); } iprot.readListEnd(); } @@ -188333,9 +192308,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1519 : struct.success) + for (String _iter1529 : struct.success) { - oprot.writeString(_iter1519); + oprot.writeString(_iter1529); } oprot.writeListEnd(); } @@ -188366,9 +192341,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1520 : struct.success) + for (String _iter1530 : struct.success) { - oprot.writeString(_iter1520); + oprot.writeString(_iter1530); } } } @@ -188380,13 +192355,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1521 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1521.size); - String _elem1522; - for (int _i1523 = 0; _i1523 < _list1521.size; ++_i1523) + org.apache.thrift.protocol.TList _list1531 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1531.size); + String _elem1532; + for (int _i1533 = 0; _i1533 < _list1531.size; ++_i1533) { - _elem1522 = iprot.readString(); - struct.success.add(_elem1522); + _elem1532 = iprot.readString(); + struct.success.add(_elem1532); } } struct.setSuccessIsSet(true); @@ -237295,14 +241270,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_all_vers case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1524 = iprot.readListBegin(); - struct.success = new ArrayList(_list1524.size); - SchemaVersion _elem1525; - for (int _i1526 = 0; _i1526 < _list1524.size; ++_i1526) + org.apache.thrift.protocol.TList _list1534 = iprot.readListBegin(); + struct.success = new ArrayList(_list1534.size); + SchemaVersion _elem1535; + for (int _i1536 = 0; _i1536 < _list1534.size; ++_i1536) { - _elem1525 = new SchemaVersion(); - _elem1525.read(iprot); - struct.success.add(_elem1525); + _elem1535 = new SchemaVersion(); + _elem1535.read(iprot); + struct.success.add(_elem1535); } iprot.readListEnd(); } @@ -237346,9 +241321,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_all_ver oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (SchemaVersion _iter1527 : struct.success) + for (SchemaVersion _iter1537 : struct.success) { - _iter1527.write(oprot); + _iter1537.write(oprot); } oprot.writeListEnd(); } @@ -237395,9 +241370,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_all_vers if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (SchemaVersion _iter1528 : struct.success) + for (SchemaVersion _iter1538 : struct.success) { - _iter1528.write(oprot); + _iter1538.write(oprot); } } } @@ -237415,14 +241390,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_all_versi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1529 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1529.size); - SchemaVersion _elem1530; - for (int _i1531 = 0; _i1531 < _list1529.size; ++_i1531) + org.apache.thrift.protocol.TList _list1539 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1539.size); + SchemaVersion _elem1540; + for (int _i1541 = 0; _i1541 < _list1539.size; ++_i1541) { - _elem1530 = new SchemaVersion(); - _elem1530.read(iprot); - struct.success.add(_elem1530); + _elem1540 = new SchemaVersion(); + _elem1540.read(iprot); + struct.success.add(_elem1540); } } struct.setSuccessIsSet(true); @@ -245965,14 +249940,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1532 = iprot.readListBegin(); - struct.success = new ArrayList(_list1532.size); - RuntimeStat _elem1533; - for (int _i1534 = 0; _i1534 < _list1532.size; ++_i1534) + org.apache.thrift.protocol.TList _list1542 = iprot.readListBegin(); + struct.success = new ArrayList(_list1542.size); + RuntimeStat _elem1543; + for (int _i1544 = 0; _i1544 < _list1542.size; ++_i1544) { - _elem1533 = new RuntimeStat(); - _elem1533.read(iprot); - struct.success.add(_elem1533); + _elem1543 = new RuntimeStat(); + _elem1543.read(iprot); + struct.success.add(_elem1543); } iprot.readListEnd(); } @@ -246007,9 +249982,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (RuntimeStat _iter1535 : struct.success) + for (RuntimeStat _iter1545 : struct.success) { - _iter1535.write(oprot); + _iter1545.write(oprot); } oprot.writeListEnd(); } @@ -246048,9 +250023,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (RuntimeStat _iter1536 : struct.success) + for (RuntimeStat _iter1546 : struct.success) { - _iter1536.write(oprot); + _iter1546.write(oprot); } } } @@ -246065,14 +250040,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1537 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1537.size); - RuntimeStat _elem1538; - for (int _i1539 = 0; _i1539 < _list1537.size; ++_i1539) + org.apache.thrift.protocol.TList _list1547 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1547.size); + RuntimeStat _elem1548; + for (int _i1549 = 0; _i1549 < _list1547.size; ++_i1549) { - _elem1538 = new RuntimeStat(); - _elem1538.read(iprot); - struct.success.add(_elem1538); + _elem1548 = new RuntimeStat(); + _elem1548.read(iprot); + struct.success.add(_elem1548); } } struct.setSuccessIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsResponse.java index 1acf6f4b4e..bcaf97e959 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, UniqueConstraintsRe case 1: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list344 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list344.size); - SQLUniqueConstraint _elem345; - for (int _i346 = 0; _i346 < _list344.size; ++_i346) + org.apache.thrift.protocol.TList _list354 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list354.size); + SQLUniqueConstraint _elem355; + for (int _i356 = 0; _i356 < _list354.size; ++_i356) { - _elem345 = new SQLUniqueConstraint(); - _elem345.read(iprot); - struct.uniqueConstraints.add(_elem345); + _elem355 = new SQLUniqueConstraint(); + _elem355.read(iprot); + struct.uniqueConstraints.add(_elem355); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, UniqueConstraintsR oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter347 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter357 : struct.uniqueConstraints) { - _iter347.write(oprot); + _iter357.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter348 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter358 : struct.uniqueConstraints) { - _iter348.write(oprot); + _iter358.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsRe public void read(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list349.size); - SQLUniqueConstraint _elem350; - for (int _i351 = 0; _i351 < _list349.size; ++_i351) + org.apache.thrift.protocol.TList _list359 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list359.size); + SQLUniqueConstraint _elem360; + for (int _i361 = 0; _i361 < _list359.size; ++_i361) { - _elem350 = new SQLUniqueConstraint(); - _elem350.read(iprot); - struct.uniqueConstraints.add(_elem350); + _elem360 = new SQLUniqueConstraint(); + _elem360.read(iprot); + struct.uniqueConstraints.add(_elem360); } } struct.setUniqueConstraintsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java index 44674798f7..75c4f39359 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java @@ -755,14 +755,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 2: // POOLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list872 = iprot.readListBegin(); - struct.pools = new ArrayList(_list872.size); - WMPool _elem873; - for (int _i874 = 0; _i874 < _list872.size; ++_i874) + org.apache.thrift.protocol.TList _list882 = iprot.readListBegin(); + struct.pools = new ArrayList(_list882.size); + WMPool _elem883; + for (int _i884 = 0; _i884 < _list882.size; ++_i884) { - _elem873 = new WMPool(); - _elem873.read(iprot); - struct.pools.add(_elem873); + _elem883 = new WMPool(); + _elem883.read(iprot); + struct.pools.add(_elem883); } iprot.readListEnd(); } @@ -774,14 +774,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 3: // MAPPINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list875 = iprot.readListBegin(); - struct.mappings = new ArrayList(_list875.size); - WMMapping _elem876; - for (int _i877 = 0; _i877 < _list875.size; ++_i877) + org.apache.thrift.protocol.TList _list885 = iprot.readListBegin(); + struct.mappings = new ArrayList(_list885.size); + WMMapping _elem886; + for (int _i887 = 0; _i887 < _list885.size; ++_i887) { - _elem876 = new WMMapping(); - _elem876.read(iprot); - struct.mappings.add(_elem876); + _elem886 = new WMMapping(); + _elem886.read(iprot); + struct.mappings.add(_elem886); } iprot.readListEnd(); } @@ -793,14 +793,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 4: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list878 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list878.size); - WMTrigger _elem879; - for (int _i880 = 0; _i880 < _list878.size; ++_i880) + org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list888.size); + WMTrigger _elem889; + for (int _i890 = 0; _i890 < _list888.size; ++_i890) { - _elem879 = new WMTrigger(); - _elem879.read(iprot); - struct.triggers.add(_elem879); + _elem889 = new WMTrigger(); + _elem889.read(iprot); + struct.triggers.add(_elem889); } iprot.readListEnd(); } @@ -812,14 +812,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 5: // POOL_TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list881 = iprot.readListBegin(); - struct.poolTriggers = new ArrayList(_list881.size); - WMPoolTrigger _elem882; - for (int _i883 = 0; _i883 < _list881.size; ++_i883) + org.apache.thrift.protocol.TList _list891 = iprot.readListBegin(); + struct.poolTriggers = new ArrayList(_list891.size); + WMPoolTrigger _elem892; + for (int _i893 = 0; _i893 < _list891.size; ++_i893) { - _elem882 = new WMPoolTrigger(); - _elem882.read(iprot); - struct.poolTriggers.add(_elem882); + _elem892 = new WMPoolTrigger(); + _elem892.read(iprot); + struct.poolTriggers.add(_elem892); } iprot.readListEnd(); } @@ -850,9 +850,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.pools.size())); - for (WMPool _iter884 : struct.pools) + for (WMPool _iter894 : struct.pools) { - _iter884.write(oprot); + _iter894.write(oprot); } oprot.writeListEnd(); } @@ -863,9 +863,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(MAPPINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mappings.size())); - for (WMMapping _iter885 : struct.mappings) + for (WMMapping _iter895 : struct.mappings) { - _iter885.write(oprot); + _iter895.write(oprot); } oprot.writeListEnd(); } @@ -877,9 +877,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter886 : struct.triggers) + for (WMTrigger _iter896 : struct.triggers) { - _iter886.write(oprot); + _iter896.write(oprot); } oprot.writeListEnd(); } @@ -891,9 +891,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOL_TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.poolTriggers.size())); - for (WMPoolTrigger _iter887 : struct.poolTriggers) + for (WMPoolTrigger _iter897 : struct.poolTriggers) { - _iter887.write(oprot); + _iter897.write(oprot); } oprot.writeListEnd(); } @@ -920,9 +920,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan struct.plan.write(oprot); { oprot.writeI32(struct.pools.size()); - for (WMPool _iter888 : struct.pools) + for (WMPool _iter898 : struct.pools) { - _iter888.write(oprot); + _iter898.write(oprot); } } BitSet optionals = new BitSet(); @@ -939,27 +939,27 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan if (struct.isSetMappings()) { { oprot.writeI32(struct.mappings.size()); - for (WMMapping _iter889 : struct.mappings) + for (WMMapping _iter899 : struct.mappings) { - _iter889.write(oprot); + _iter899.write(oprot); } } } if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter890 : struct.triggers) + for (WMTrigger _iter900 : struct.triggers) { - _iter890.write(oprot); + _iter900.write(oprot); } } } if (struct.isSetPoolTriggers()) { { oprot.writeI32(struct.poolTriggers.size()); - for (WMPoolTrigger _iter891 : struct.poolTriggers) + for (WMPoolTrigger _iter901 : struct.poolTriggers) { - _iter891.write(oprot); + _iter901.write(oprot); } } } @@ -972,56 +972,56 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan s struct.plan.read(iprot); struct.setPlanIsSet(true); { - org.apache.thrift.protocol.TList _list892 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.pools = new ArrayList(_list892.size); - WMPool _elem893; - for (int _i894 = 0; _i894 < _list892.size; ++_i894) + org.apache.thrift.protocol.TList _list902 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.pools = new ArrayList(_list902.size); + WMPool _elem903; + for (int _i904 = 0; _i904 < _list902.size; ++_i904) { - _elem893 = new WMPool(); - _elem893.read(iprot); - struct.pools.add(_elem893); + _elem903 = new WMPool(); + _elem903.read(iprot); + struct.pools.add(_elem903); } } struct.setPoolsIsSet(true); BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list895 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.mappings = new ArrayList(_list895.size); - WMMapping _elem896; - for (int _i897 = 0; _i897 < _list895.size; ++_i897) + org.apache.thrift.protocol.TList _list905 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.mappings = new ArrayList(_list905.size); + WMMapping _elem906; + for (int _i907 = 0; _i907 < _list905.size; ++_i907) { - _elem896 = new WMMapping(); - _elem896.read(iprot); - struct.mappings.add(_elem896); + _elem906 = new WMMapping(); + _elem906.read(iprot); + struct.mappings.add(_elem906); } } struct.setMappingsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list898 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list898.size); - WMTrigger _elem899; - for (int _i900 = 0; _i900 < _list898.size; ++_i900) + org.apache.thrift.protocol.TList _list908 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list908.size); + WMTrigger _elem909; + for (int _i910 = 0; _i910 < _list908.size; ++_i910) { - _elem899 = new WMTrigger(); - _elem899.read(iprot); - struct.triggers.add(_elem899); + _elem909 = new WMTrigger(); + _elem909.read(iprot); + struct.triggers.add(_elem909); } } struct.setTriggersIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.poolTriggers = new ArrayList(_list901.size); - WMPoolTrigger _elem902; - for (int _i903 = 0; _i903 < _list901.size; ++_i903) + org.apache.thrift.protocol.TList _list911 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.poolTriggers = new ArrayList(_list911.size); + WMPoolTrigger _elem912; + for (int _i913 = 0; _i913 < _list911.size; ++_i913) { - _elem902 = new WMPoolTrigger(); - _elem902.read(iprot); - struct.poolTriggers.add(_elem902); + _elem912 = new WMPoolTrigger(); + _elem912.read(iprot); + struct.poolTriggers.add(_elem912); } } struct.setPoolTriggersIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java index c6cb845585..8d0f0dfe90 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetAllResourcePla case 1: // RESOURCE_PLANS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); - struct.resourcePlans = new ArrayList(_list904.size); - WMResourcePlan _elem905; - for (int _i906 = 0; _i906 < _list904.size; ++_i906) + org.apache.thrift.protocol.TList _list914 = iprot.readListBegin(); + struct.resourcePlans = new ArrayList(_list914.size); + WMResourcePlan _elem915; + for (int _i916 = 0; _i916 < _list914.size; ++_i916) { - _elem905 = new WMResourcePlan(); - _elem905.read(iprot); - struct.resourcePlans.add(_elem905); + _elem915 = new WMResourcePlan(); + _elem915.read(iprot); + struct.resourcePlans.add(_elem915); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetAllResourcePl oprot.writeFieldBegin(RESOURCE_PLANS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourcePlans.size())); - for (WMResourcePlan _iter907 : struct.resourcePlans) + for (WMResourcePlan _iter917 : struct.resourcePlans) { - _iter907.write(oprot); + _iter917.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePla if (struct.isSetResourcePlans()) { { oprot.writeI32(struct.resourcePlans.size()); - for (WMResourcePlan _iter908 : struct.resourcePlans) + for (WMResourcePlan _iter918 : struct.resourcePlans) { - _iter908.write(oprot); + _iter918.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePlan BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourcePlans = new ArrayList(_list909.size); - WMResourcePlan _elem910; - for (int _i911 = 0; _i911 < _list909.size; ++_i911) + org.apache.thrift.protocol.TList _list919 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourcePlans = new ArrayList(_list919.size); + WMResourcePlan _elem920; + for (int _i921 = 0; _i921 < _list919.size; ++_i921) { - _elem910 = new WMResourcePlan(); - _elem910.read(iprot); - struct.resourcePlans.add(_elem910); + _elem920 = new WMResourcePlan(); + _elem920.read(iprot); + struct.resourcePlans.add(_elem920); } } struct.setResourcePlansIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java index 9eed335cda..f4f87a4c7e 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetTriggersForRes case 1: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list928 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list928.size); - WMTrigger _elem929; - for (int _i930 = 0; _i930 < _list928.size; ++_i930) + org.apache.thrift.protocol.TList _list938 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list938.size); + WMTrigger _elem939; + for (int _i940 = 0; _i940 < _list938.size; ++_i940) { - _elem929 = new WMTrigger(); - _elem929.read(iprot); - struct.triggers.add(_elem929); + _elem939 = new WMTrigger(); + _elem939.read(iprot); + struct.triggers.add(_elem939); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetTriggersForRe oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter931 : struct.triggers) + for (WMTrigger _iter941 : struct.triggers) { - _iter931.write(oprot); + _iter941.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForRes if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter932 : struct.triggers) + for (WMTrigger _iter942 : struct.triggers) { - _iter932.write(oprot); + _iter942.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForReso BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list933.size); - WMTrigger _elem934; - for (int _i935 = 0; _i935 < _list933.size; ++_i935) + org.apache.thrift.protocol.TList _list943 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list943.size); + WMTrigger _elem944; + for (int _i945 = 0; _i945 < _list943.size; ++_i945) { - _elem934 = new WMTrigger(); - _elem934.read(iprot); - struct.triggers.add(_elem934); + _elem944 = new WMTrigger(); + _elem944.read(iprot); + struct.triggers.add(_elem944); } } struct.setTriggersIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java index ee9251c866..3130cc1741 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java @@ -441,13 +441,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 1: // ERRORS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list912 = iprot.readListBegin(); - struct.errors = new ArrayList(_list912.size); - String _elem913; - for (int _i914 = 0; _i914 < _list912.size; ++_i914) + org.apache.thrift.protocol.TList _list922 = iprot.readListBegin(); + struct.errors = new ArrayList(_list922.size); + String _elem923; + for (int _i924 = 0; _i924 < _list922.size; ++_i924) { - _elem913 = iprot.readString(); - struct.errors.add(_elem913); + _elem923 = iprot.readString(); + struct.errors.add(_elem923); } iprot.readListEnd(); } @@ -459,13 +459,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 2: // WARNINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list915 = iprot.readListBegin(); - struct.warnings = new ArrayList(_list915.size); - String _elem916; - for (int _i917 = 0; _i917 < _list915.size; ++_i917) + org.apache.thrift.protocol.TList _list925 = iprot.readListBegin(); + struct.warnings = new ArrayList(_list925.size); + String _elem926; + for (int _i927 = 0; _i927 < _list925.size; ++_i927) { - _elem916 = iprot.readString(); - struct.warnings.add(_elem916); + _elem926 = iprot.readString(); + struct.warnings.add(_elem926); } iprot.readListEnd(); } @@ -492,9 +492,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(ERRORS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.errors.size())); - for (String _iter918 : struct.errors) + for (String _iter928 : struct.errors) { - oprot.writeString(_iter918); + oprot.writeString(_iter928); } oprot.writeListEnd(); } @@ -506,9 +506,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(WARNINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.warnings.size())); - for (String _iter919 : struct.warnings) + for (String _iter929 : struct.warnings) { - oprot.writeString(_iter919); + oprot.writeString(_iter929); } oprot.writeListEnd(); } @@ -543,18 +543,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMValidateResourceP if (struct.isSetErrors()) { { oprot.writeI32(struct.errors.size()); - for (String _iter920 : struct.errors) + for (String _iter930 : struct.errors) { - oprot.writeString(_iter920); + oprot.writeString(_iter930); } } } if (struct.isSetWarnings()) { { oprot.writeI32(struct.warnings.size()); - for (String _iter921 : struct.warnings) + for (String _iter931 : struct.warnings) { - oprot.writeString(_iter921); + oprot.writeString(_iter931); } } } @@ -566,26 +566,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMValidateResourcePl BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list922 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.errors = new ArrayList(_list922.size); - String _elem923; - for (int _i924 = 0; _i924 < _list922.size; ++_i924) + org.apache.thrift.protocol.TList _list932 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.errors = new ArrayList(_list932.size); + String _elem933; + for (int _i934 = 0; _i934 < _list932.size; ++_i934) { - _elem923 = iprot.readString(); - struct.errors.add(_elem923); + _elem933 = iprot.readString(); + struct.errors.add(_elem933); } } struct.setErrorsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list925 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.warnings = new ArrayList(_list925.size); - String _elem926; - for (int _i927 = 0; _i927 < _list925.size; ++_i927) + org.apache.thrift.protocol.TList _list935 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.warnings = new ArrayList(_list935.size); + String _elem936; + for (int _i937 = 0; _i937 < _list935.size; ++_i937) { - _elem926 = iprot.readString(); - struct.warnings.add(_elem926); + _elem936 = iprot.readString(); + struct.warnings.add(_elem936); } } struct.setWarningsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java index c7ef726f54..56b025939a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java @@ -813,13 +813,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WriteNotificationLo case 6: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list772 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list772.size); - String _elem773; - for (int _i774 = 0; _i774 < _list772.size; ++_i774) + org.apache.thrift.protocol.TList _list782 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list782.size); + String _elem783; + for (int _i784 = 0; _i784 < _list782.size; ++_i784) { - _elem773 = iprot.readString(); - struct.partitionVals.add(_elem773); + _elem783 = iprot.readString(); + struct.partitionVals.add(_elem783); } iprot.readListEnd(); } @@ -867,9 +867,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WriteNotificationL oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter775 : struct.partitionVals) + for (String _iter785 : struct.partitionVals) { - oprot.writeString(_iter775); + oprot.writeString(_iter785); } oprot.writeListEnd(); } @@ -906,9 +906,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WriteNotificationLo if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter776 : struct.partitionVals) + for (String _iter786 : struct.partitionVals) { - oprot.writeString(_iter776); + oprot.writeString(_iter786); } } } @@ -931,13 +931,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WriteNotificationLog BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list777.size); - String _elem778; - for (int _i779 = 0; _i779 < _list777.size; ++_i779) + org.apache.thrift.protocol.TList _list787 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list787.size); + String _elem788; + for (int _i789 = 0; _i789 < _list787.size; ++_i789) { - _elem778 = iprot.readString(); - struct.partitionVals.add(_elem778); + _elem788 = iprot.readString(); + struct.partitionVals.add(_elem788); } } struct.setPartitionValsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index cc19f2389e..164c402378 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -899,6 +899,33 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { */ public function update_partition_column_statistics_req(\metastore\SetPartitionsStatsRequest $req); /** + * @param \metastore\SetBasicStatsRequest $req + * @return \metastore\SetBasicStatsResponse + * @throws \metastore\NoSuchObjectException + * @throws \metastore\InvalidObjectException + * @throws \metastore\MetaException + * @throws \metastore\InvalidInputException + */ + public function update_table_basic_statistics_req(\metastore\SetBasicStatsRequest $req); + /** + * @param \metastore\SetBasicStatsRequest $req + * @return \metastore\SetBasicStatsResponse + * @throws \metastore\NoSuchObjectException + * @throws \metastore\InvalidObjectException + * @throws \metastore\MetaException + * @throws \metastore\InvalidInputException + */ + public function update_partition_basic_statistics_req(\metastore\SetBasicStatsRequest $req); + /** + * @param \metastore\InvalidateColumnStatsRequest $req + * @return \metastore\InvalidateColumnStatsResponse + * @throws \metastore\NoSuchObjectException + * @throws \metastore\InvalidObjectException + * @throws \metastore\MetaException + * @throws \metastore\InvalidInputException + */ + public function invalidate_all_column_statistics_req(\metastore\InvalidateColumnStatsRequest $req); + /** * @param string $db_name * @param string $tbl_name * @param string $col_name @@ -7793,6 +7820,195 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("update_partition_column_statistics_req failed: unknown result"); } + public function update_table_basic_statistics_req(\metastore\SetBasicStatsRequest $req) + { + $this->send_update_table_basic_statistics_req($req); + return $this->recv_update_table_basic_statistics_req(); + } + + public function send_update_table_basic_statistics_req(\metastore\SetBasicStatsRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_update_table_basic_statistics_req_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'update_table_basic_statistics_req', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('update_table_basic_statistics_req', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_update_table_basic_statistics_req() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_update_table_basic_statistics_req_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_update_table_basic_statistics_req_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + if ($result->o4 !== null) { + throw $result->o4; + } + throw new \Exception("update_table_basic_statistics_req failed: unknown result"); + } + + public function update_partition_basic_statistics_req(\metastore\SetBasicStatsRequest $req) + { + $this->send_update_partition_basic_statistics_req($req); + return $this->recv_update_partition_basic_statistics_req(); + } + + public function send_update_partition_basic_statistics_req(\metastore\SetBasicStatsRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_update_partition_basic_statistics_req_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'update_partition_basic_statistics_req', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('update_partition_basic_statistics_req', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_update_partition_basic_statistics_req() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_update_partition_basic_statistics_req_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_update_partition_basic_statistics_req_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + if ($result->o4 !== null) { + throw $result->o4; + } + throw new \Exception("update_partition_basic_statistics_req failed: unknown result"); + } + + public function invalidate_all_column_statistics_req(\metastore\InvalidateColumnStatsRequest $req) + { + $this->send_invalidate_all_column_statistics_req($req); + return $this->recv_invalidate_all_column_statistics_req(); + } + + public function send_invalidate_all_column_statistics_req(\metastore\InvalidateColumnStatsRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_invalidate_all_column_statistics_req_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'invalidate_all_column_statistics_req', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('invalidate_all_column_statistics_req', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_invalidate_all_column_statistics_req() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_invalidate_all_column_statistics_req_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_invalidate_all_column_statistics_req_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + if ($result->o4 !== null) { + throw $result->o4; + } + throw new \Exception("invalidate_all_column_statistics_req failed: unknown result"); + } + public function get_table_column_statistics($db_name, $tbl_name, $col_name) { $this->send_get_table_column_statistics($db_name, $tbl_name, $col_name); @@ -15892,14 +16108,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size854 = 0; - $_etype857 = 0; - $xfer += $input->readListBegin($_etype857, $_size854); - for ($_i858 = 0; $_i858 < $_size854; ++$_i858) + $_size863 = 0; + $_etype866 = 0; + $xfer += $input->readListBegin($_etype866, $_size863); + for ($_i867 = 0; $_i867 < $_size863; ++$_i867) { - $elem859 = null; - $xfer += $input->readString($elem859); - $this->success []= $elem859; + $elem868 = null; + $xfer += $input->readString($elem868); + $this->success []= $elem868; } $xfer += $input->readListEnd(); } else { @@ -15935,9 +16151,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter860) + foreach ($this->success as $iter869) { - $xfer += $output->writeString($iter860); + $xfer += $output->writeString($iter869); } } $output->writeListEnd(); @@ -16068,14 +16284,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size861 = 0; - $_etype864 = 0; - $xfer += $input->readListBegin($_etype864, $_size861); - for ($_i865 = 0; $_i865 < $_size861; ++$_i865) + $_size870 = 0; + $_etype873 = 0; + $xfer += $input->readListBegin($_etype873, $_size870); + for ($_i874 = 0; $_i874 < $_size870; ++$_i874) { - $elem866 = null; - $xfer += $input->readString($elem866); - $this->success []= $elem866; + $elem875 = null; + $xfer += $input->readString($elem875); + $this->success []= $elem875; } $xfer += $input->readListEnd(); } else { @@ -16111,9 +16327,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter867) + foreach ($this->success as $iter876) { - $xfer += $output->writeString($iter867); + $xfer += $output->writeString($iter876); } } $output->writeListEnd(); @@ -17114,18 +17330,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size868 = 0; - $_ktype869 = 0; - $_vtype870 = 0; - $xfer += $input->readMapBegin($_ktype869, $_vtype870, $_size868); - for ($_i872 = 0; $_i872 < $_size868; ++$_i872) + $_size877 = 0; + $_ktype878 = 0; + $_vtype879 = 0; + $xfer += $input->readMapBegin($_ktype878, $_vtype879, $_size877); + for ($_i881 = 0; $_i881 < $_size877; ++$_i881) { - $key873 = ''; - $val874 = new \metastore\Type(); - $xfer += $input->readString($key873); - $val874 = new \metastore\Type(); - $xfer += $val874->read($input); - $this->success[$key873] = $val874; + $key882 = ''; + $val883 = new \metastore\Type(); + $xfer += $input->readString($key882); + $val883 = new \metastore\Type(); + $xfer += $val883->read($input); + $this->success[$key882] = $val883; } $xfer += $input->readMapEnd(); } else { @@ -17161,10 +17377,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter875 => $viter876) + foreach ($this->success as $kiter884 => $viter885) { - $xfer += $output->writeString($kiter875); - $xfer += $viter876->write($output); + $xfer += $output->writeString($kiter884); + $xfer += $viter885->write($output); } } $output->writeMapEnd(); @@ -17368,15 +17584,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size877 = 0; - $_etype880 = 0; - $xfer += $input->readListBegin($_etype880, $_size877); - for ($_i881 = 0; $_i881 < $_size877; ++$_i881) + $_size886 = 0; + $_etype889 = 0; + $xfer += $input->readListBegin($_etype889, $_size886); + for ($_i890 = 0; $_i890 < $_size886; ++$_i890) { - $elem882 = null; - $elem882 = new \metastore\FieldSchema(); - $xfer += $elem882->read($input); - $this->success []= $elem882; + $elem891 = null; + $elem891 = new \metastore\FieldSchema(); + $xfer += $elem891->read($input); + $this->success []= $elem891; } $xfer += $input->readListEnd(); } else { @@ -17428,9 +17644,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter883) + foreach ($this->success as $iter892) { - $xfer += $iter883->write($output); + $xfer += $iter892->write($output); } } $output->writeListEnd(); @@ -17672,15 +17888,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size884 = 0; - $_etype887 = 0; - $xfer += $input->readListBegin($_etype887, $_size884); - for ($_i888 = 0; $_i888 < $_size884; ++$_i888) + $_size893 = 0; + $_etype896 = 0; + $xfer += $input->readListBegin($_etype896, $_size893); + for ($_i897 = 0; $_i897 < $_size893; ++$_i897) { - $elem889 = null; - $elem889 = new \metastore\FieldSchema(); - $xfer += $elem889->read($input); - $this->success []= $elem889; + $elem898 = null; + $elem898 = new \metastore\FieldSchema(); + $xfer += $elem898->read($input); + $this->success []= $elem898; } $xfer += $input->readListEnd(); } else { @@ -17732,9 +17948,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter890) + foreach ($this->success as $iter899) { - $xfer += $iter890->write($output); + $xfer += $iter899->write($output); } } $output->writeListEnd(); @@ -17948,15 +18164,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size891 = 0; - $_etype894 = 0; - $xfer += $input->readListBegin($_etype894, $_size891); - for ($_i895 = 0; $_i895 < $_size891; ++$_i895) + $_size900 = 0; + $_etype903 = 0; + $xfer += $input->readListBegin($_etype903, $_size900); + for ($_i904 = 0; $_i904 < $_size900; ++$_i904) { - $elem896 = null; - $elem896 = new \metastore\FieldSchema(); - $xfer += $elem896->read($input); - $this->success []= $elem896; + $elem905 = null; + $elem905 = new \metastore\FieldSchema(); + $xfer += $elem905->read($input); + $this->success []= $elem905; } $xfer += $input->readListEnd(); } else { @@ -18008,9 +18224,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter897) + foreach ($this->success as $iter906) { - $xfer += $iter897->write($output); + $xfer += $iter906->write($output); } } $output->writeListEnd(); @@ -18252,15 +18468,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size898 = 0; - $_etype901 = 0; - $xfer += $input->readListBegin($_etype901, $_size898); - for ($_i902 = 0; $_i902 < $_size898; ++$_i902) + $_size907 = 0; + $_etype910 = 0; + $xfer += $input->readListBegin($_etype910, $_size907); + for ($_i911 = 0; $_i911 < $_size907; ++$_i911) { - $elem903 = null; - $elem903 = new \metastore\FieldSchema(); - $xfer += $elem903->read($input); - $this->success []= $elem903; + $elem912 = null; + $elem912 = new \metastore\FieldSchema(); + $xfer += $elem912->read($input); + $this->success []= $elem912; } $xfer += $input->readListEnd(); } else { @@ -18312,9 +18528,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter904) + foreach ($this->success as $iter913) { - $xfer += $iter904->write($output); + $xfer += $iter913->write($output); } } $output->writeListEnd(); @@ -18986,15 +19202,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size905 = 0; - $_etype908 = 0; - $xfer += $input->readListBegin($_etype908, $_size905); - for ($_i909 = 0; $_i909 < $_size905; ++$_i909) + $_size914 = 0; + $_etype917 = 0; + $xfer += $input->readListBegin($_etype917, $_size914); + for ($_i918 = 0; $_i918 < $_size914; ++$_i918) { - $elem910 = null; - $elem910 = new \metastore\SQLPrimaryKey(); - $xfer += $elem910->read($input); - $this->primaryKeys []= $elem910; + $elem919 = null; + $elem919 = new \metastore\SQLPrimaryKey(); + $xfer += $elem919->read($input); + $this->primaryKeys []= $elem919; } $xfer += $input->readListEnd(); } else { @@ -19004,15 +19220,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size911 = 0; - $_etype914 = 0; - $xfer += $input->readListBegin($_etype914, $_size911); - for ($_i915 = 0; $_i915 < $_size911; ++$_i915) + $_size920 = 0; + $_etype923 = 0; + $xfer += $input->readListBegin($_etype923, $_size920); + for ($_i924 = 0; $_i924 < $_size920; ++$_i924) { - $elem916 = null; - $elem916 = new \metastore\SQLForeignKey(); - $xfer += $elem916->read($input); - $this->foreignKeys []= $elem916; + $elem925 = null; + $elem925 = new \metastore\SQLForeignKey(); + $xfer += $elem925->read($input); + $this->foreignKeys []= $elem925; } $xfer += $input->readListEnd(); } else { @@ -19022,15 +19238,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size917 = 0; - $_etype920 = 0; - $xfer += $input->readListBegin($_etype920, $_size917); - for ($_i921 = 0; $_i921 < $_size917; ++$_i921) + $_size926 = 0; + $_etype929 = 0; + $xfer += $input->readListBegin($_etype929, $_size926); + for ($_i930 = 0; $_i930 < $_size926; ++$_i930) { - $elem922 = null; - $elem922 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem922->read($input); - $this->uniqueConstraints []= $elem922; + $elem931 = null; + $elem931 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem931->read($input); + $this->uniqueConstraints []= $elem931; } $xfer += $input->readListEnd(); } else { @@ -19040,15 +19256,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size923 = 0; - $_etype926 = 0; - $xfer += $input->readListBegin($_etype926, $_size923); - for ($_i927 = 0; $_i927 < $_size923; ++$_i927) + $_size932 = 0; + $_etype935 = 0; + $xfer += $input->readListBegin($_etype935, $_size932); + for ($_i936 = 0; $_i936 < $_size932; ++$_i936) { - $elem928 = null; - $elem928 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem928->read($input); - $this->notNullConstraints []= $elem928; + $elem937 = null; + $elem937 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem937->read($input); + $this->notNullConstraints []= $elem937; } $xfer += $input->readListEnd(); } else { @@ -19058,15 +19274,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 6: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size929 = 0; - $_etype932 = 0; - $xfer += $input->readListBegin($_etype932, $_size929); - for ($_i933 = 0; $_i933 < $_size929; ++$_i933) + $_size938 = 0; + $_etype941 = 0; + $xfer += $input->readListBegin($_etype941, $_size938); + for ($_i942 = 0; $_i942 < $_size938; ++$_i942) { - $elem934 = null; - $elem934 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem934->read($input); - $this->defaultConstraints []= $elem934; + $elem943 = null; + $elem943 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem943->read($input); + $this->defaultConstraints []= $elem943; } $xfer += $input->readListEnd(); } else { @@ -19076,15 +19292,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 7: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size935 = 0; - $_etype938 = 0; - $xfer += $input->readListBegin($_etype938, $_size935); - for ($_i939 = 0; $_i939 < $_size935; ++$_i939) + $_size944 = 0; + $_etype947 = 0; + $xfer += $input->readListBegin($_etype947, $_size944); + for ($_i948 = 0; $_i948 < $_size944; ++$_i948) { - $elem940 = null; - $elem940 = new \metastore\SQLCheckConstraint(); - $xfer += $elem940->read($input); - $this->checkConstraints []= $elem940; + $elem949 = null; + $elem949 = new \metastore\SQLCheckConstraint(); + $xfer += $elem949->read($input); + $this->checkConstraints []= $elem949; } $xfer += $input->readListEnd(); } else { @@ -19120,9 +19336,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter941) + foreach ($this->primaryKeys as $iter950) { - $xfer += $iter941->write($output); + $xfer += $iter950->write($output); } } $output->writeListEnd(); @@ -19137,9 +19353,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter942) + foreach ($this->foreignKeys as $iter951) { - $xfer += $iter942->write($output); + $xfer += $iter951->write($output); } } $output->writeListEnd(); @@ -19154,9 +19370,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter943) + foreach ($this->uniqueConstraints as $iter952) { - $xfer += $iter943->write($output); + $xfer += $iter952->write($output); } } $output->writeListEnd(); @@ -19171,9 +19387,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter944) + foreach ($this->notNullConstraints as $iter953) { - $xfer += $iter944->write($output); + $xfer += $iter953->write($output); } } $output->writeListEnd(); @@ -19188,9 +19404,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter945) + foreach ($this->defaultConstraints as $iter954) { - $xfer += $iter945->write($output); + $xfer += $iter954->write($output); } } $output->writeListEnd(); @@ -19205,9 +19421,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); { - foreach ($this->checkConstraints as $iter946) + foreach ($this->checkConstraints as $iter955) { - $xfer += $iter946->write($output); + $xfer += $iter955->write($output); } } $output->writeListEnd(); @@ -21207,14 +21423,14 @@ class ThriftHiveMetastore_truncate_table_args { case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size947 = 0; - $_etype950 = 0; - $xfer += $input->readListBegin($_etype950, $_size947); - for ($_i951 = 0; $_i951 < $_size947; ++$_i951) + $_size956 = 0; + $_etype959 = 0; + $xfer += $input->readListBegin($_etype959, $_size956); + for ($_i960 = 0; $_i960 < $_size956; ++$_i960) { - $elem952 = null; - $xfer += $input->readString($elem952); - $this->partNames []= $elem952; + $elem961 = null; + $xfer += $input->readString($elem961); + $this->partNames []= $elem961; } $xfer += $input->readListEnd(); } else { @@ -21252,9 +21468,9 @@ class ThriftHiveMetastore_truncate_table_args { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter953) + foreach ($this->partNames as $iter962) { - $xfer += $output->writeString($iter953); + $xfer += $output->writeString($iter962); } } $output->writeListEnd(); @@ -21690,14 +21906,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size954 = 0; - $_etype957 = 0; - $xfer += $input->readListBegin($_etype957, $_size954); - for ($_i958 = 0; $_i958 < $_size954; ++$_i958) + $_size963 = 0; + $_etype966 = 0; + $xfer += $input->readListBegin($_etype966, $_size963); + for ($_i967 = 0; $_i967 < $_size963; ++$_i967) { - $elem959 = null; - $xfer += $input->readString($elem959); - $this->success []= $elem959; + $elem968 = null; + $xfer += $input->readString($elem968); + $this->success []= $elem968; } $xfer += $input->readListEnd(); } else { @@ -21733,9 +21949,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter960) + foreach ($this->success as $iter969) { - $xfer += $output->writeString($iter960); + $xfer += $output->writeString($iter969); } } $output->writeListEnd(); @@ -21937,14 +22153,14 @@ class ThriftHiveMetastore_get_tables_by_type_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size961 = 0; - $_etype964 = 0; - $xfer += $input->readListBegin($_etype964, $_size961); - for ($_i965 = 0; $_i965 < $_size961; ++$_i965) + $_size970 = 0; + $_etype973 = 0; + $xfer += $input->readListBegin($_etype973, $_size970); + for ($_i974 = 0; $_i974 < $_size970; ++$_i974) { - $elem966 = null; - $xfer += $input->readString($elem966); - $this->success []= $elem966; + $elem975 = null; + $xfer += $input->readString($elem975); + $this->success []= $elem975; } $xfer += $input->readListEnd(); } else { @@ -21980,9 +22196,9 @@ class ThriftHiveMetastore_get_tables_by_type_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter967) + foreach ($this->success as $iter976) { - $xfer += $output->writeString($iter967); + $xfer += $output->writeString($iter976); } } $output->writeListEnd(); @@ -22138,14 +22354,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size968 = 0; - $_etype971 = 0; - $xfer += $input->readListBegin($_etype971, $_size968); - for ($_i972 = 0; $_i972 < $_size968; ++$_i972) + $_size977 = 0; + $_etype980 = 0; + $xfer += $input->readListBegin($_etype980, $_size977); + for ($_i981 = 0; $_i981 < $_size977; ++$_i981) { - $elem973 = null; - $xfer += $input->readString($elem973); - $this->success []= $elem973; + $elem982 = null; + $xfer += $input->readString($elem982); + $this->success []= $elem982; } $xfer += $input->readListEnd(); } else { @@ -22181,9 +22397,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter974) + foreach ($this->success as $iter983) { - $xfer += $output->writeString($iter974); + $xfer += $output->writeString($iter983); } } $output->writeListEnd(); @@ -22288,14 +22504,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size975 = 0; - $_etype978 = 0; - $xfer += $input->readListBegin($_etype978, $_size975); - for ($_i979 = 0; $_i979 < $_size975; ++$_i979) + $_size984 = 0; + $_etype987 = 0; + $xfer += $input->readListBegin($_etype987, $_size984); + for ($_i988 = 0; $_i988 < $_size984; ++$_i988) { - $elem980 = null; - $xfer += $input->readString($elem980); - $this->tbl_types []= $elem980; + $elem989 = null; + $xfer += $input->readString($elem989); + $this->tbl_types []= $elem989; } $xfer += $input->readListEnd(); } else { @@ -22333,9 +22549,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter981) + foreach ($this->tbl_types as $iter990) { - $xfer += $output->writeString($iter981); + $xfer += $output->writeString($iter990); } } $output->writeListEnd(); @@ -22412,15 +22628,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size982 = 0; - $_etype985 = 0; - $xfer += $input->readListBegin($_etype985, $_size982); - for ($_i986 = 0; $_i986 < $_size982; ++$_i986) + $_size991 = 0; + $_etype994 = 0; + $xfer += $input->readListBegin($_etype994, $_size991); + for ($_i995 = 0; $_i995 < $_size991; ++$_i995) { - $elem987 = null; - $elem987 = new \metastore\TableMeta(); - $xfer += $elem987->read($input); - $this->success []= $elem987; + $elem996 = null; + $elem996 = new \metastore\TableMeta(); + $xfer += $elem996->read($input); + $this->success []= $elem996; } $xfer += $input->readListEnd(); } else { @@ -22456,9 +22672,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter988) + foreach ($this->success as $iter997) { - $xfer += $iter988->write($output); + $xfer += $iter997->write($output); } } $output->writeListEnd(); @@ -22614,14 +22830,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size989 = 0; - $_etype992 = 0; - $xfer += $input->readListBegin($_etype992, $_size989); - for ($_i993 = 0; $_i993 < $_size989; ++$_i993) + $_size998 = 0; + $_etype1001 = 0; + $xfer += $input->readListBegin($_etype1001, $_size998); + for ($_i1002 = 0; $_i1002 < $_size998; ++$_i1002) { - $elem994 = null; - $xfer += $input->readString($elem994); - $this->success []= $elem994; + $elem1003 = null; + $xfer += $input->readString($elem1003); + $this->success []= $elem1003; } $xfer += $input->readListEnd(); } else { @@ -22657,9 +22873,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter995) + foreach ($this->success as $iter1004) { - $xfer += $output->writeString($iter995); + $xfer += $output->writeString($iter1004); } } $output->writeListEnd(); @@ -22974,14 +23190,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size996 = 0; - $_etype999 = 0; - $xfer += $input->readListBegin($_etype999, $_size996); - for ($_i1000 = 0; $_i1000 < $_size996; ++$_i1000) + $_size1005 = 0; + $_etype1008 = 0; + $xfer += $input->readListBegin($_etype1008, $_size1005); + for ($_i1009 = 0; $_i1009 < $_size1005; ++$_i1009) { - $elem1001 = null; - $xfer += $input->readString($elem1001); - $this->tbl_names []= $elem1001; + $elem1010 = null; + $xfer += $input->readString($elem1010); + $this->tbl_names []= $elem1010; } $xfer += $input->readListEnd(); } else { @@ -23014,9 +23230,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter1002) + foreach ($this->tbl_names as $iter1011) { - $xfer += $output->writeString($iter1002); + $xfer += $output->writeString($iter1011); } } $output->writeListEnd(); @@ -23081,15 +23297,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1003 = 0; - $_etype1006 = 0; - $xfer += $input->readListBegin($_etype1006, $_size1003); - for ($_i1007 = 0; $_i1007 < $_size1003; ++$_i1007) + $_size1012 = 0; + $_etype1015 = 0; + $xfer += $input->readListBegin($_etype1015, $_size1012); + for ($_i1016 = 0; $_i1016 < $_size1012; ++$_i1016) { - $elem1008 = null; - $elem1008 = new \metastore\Table(); - $xfer += $elem1008->read($input); - $this->success []= $elem1008; + $elem1017 = null; + $elem1017 = new \metastore\Table(); + $xfer += $elem1017->read($input); + $this->success []= $elem1017; } $xfer += $input->readListEnd(); } else { @@ -23117,9 +23333,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1009) + foreach ($this->success as $iter1018) { - $xfer += $iter1009->write($output); + $xfer += $iter1018->write($output); } } $output->writeListEnd(); @@ -24319,14 +24535,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1010 = 0; - $_etype1013 = 0; - $xfer += $input->readListBegin($_etype1013, $_size1010); - for ($_i1014 = 0; $_i1014 < $_size1010; ++$_i1014) + $_size1019 = 0; + $_etype1022 = 0; + $xfer += $input->readListBegin($_etype1022, $_size1019); + for ($_i1023 = 0; $_i1023 < $_size1019; ++$_i1023) { - $elem1015 = null; - $xfer += $input->readString($elem1015); - $this->success []= $elem1015; + $elem1024 = null; + $xfer += $input->readString($elem1024); + $this->success []= $elem1024; } $xfer += $input->readListEnd(); } else { @@ -24378,9 +24594,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1016) + foreach ($this->success as $iter1025) { - $xfer += $output->writeString($iter1016); + $xfer += $output->writeString($iter1025); } } $output->writeListEnd(); @@ -25903,15 +26119,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1017 = 0; - $_etype1020 = 0; - $xfer += $input->readListBegin($_etype1020, $_size1017); - for ($_i1021 = 0; $_i1021 < $_size1017; ++$_i1021) + $_size1026 = 0; + $_etype1029 = 0; + $xfer += $input->readListBegin($_etype1029, $_size1026); + for ($_i1030 = 0; $_i1030 < $_size1026; ++$_i1030) { - $elem1022 = null; - $elem1022 = new \metastore\Partition(); - $xfer += $elem1022->read($input); - $this->new_parts []= $elem1022; + $elem1031 = null; + $elem1031 = new \metastore\Partition(); + $xfer += $elem1031->read($input); + $this->new_parts []= $elem1031; } $xfer += $input->readListEnd(); } else { @@ -25939,9 +26155,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1023) + foreach ($this->new_parts as $iter1032) { - $xfer += $iter1023->write($output); + $xfer += $iter1032->write($output); } } $output->writeListEnd(); @@ -26156,15 +26372,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1024 = 0; - $_etype1027 = 0; - $xfer += $input->readListBegin($_etype1027, $_size1024); - for ($_i1028 = 0; $_i1028 < $_size1024; ++$_i1028) + $_size1033 = 0; + $_etype1036 = 0; + $xfer += $input->readListBegin($_etype1036, $_size1033); + for ($_i1037 = 0; $_i1037 < $_size1033; ++$_i1037) { - $elem1029 = null; - $elem1029 = new \metastore\PartitionSpec(); - $xfer += $elem1029->read($input); - $this->new_parts []= $elem1029; + $elem1038 = null; + $elem1038 = new \metastore\PartitionSpec(); + $xfer += $elem1038->read($input); + $this->new_parts []= $elem1038; } $xfer += $input->readListEnd(); } else { @@ -26192,9 +26408,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1030) + foreach ($this->new_parts as $iter1039) { - $xfer += $iter1030->write($output); + $xfer += $iter1039->write($output); } } $output->writeListEnd(); @@ -26444,14 +26660,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1031 = 0; - $_etype1034 = 0; - $xfer += $input->readListBegin($_etype1034, $_size1031); - for ($_i1035 = 0; $_i1035 < $_size1031; ++$_i1035) + $_size1040 = 0; + $_etype1043 = 0; + $xfer += $input->readListBegin($_etype1043, $_size1040); + for ($_i1044 = 0; $_i1044 < $_size1040; ++$_i1044) { - $elem1036 = null; - $xfer += $input->readString($elem1036); - $this->part_vals []= $elem1036; + $elem1045 = null; + $xfer += $input->readString($elem1045); + $this->part_vals []= $elem1045; } $xfer += $input->readListEnd(); } else { @@ -26489,9 +26705,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1037) + foreach ($this->part_vals as $iter1046) { - $xfer += $output->writeString($iter1037); + $xfer += $output->writeString($iter1046); } } $output->writeListEnd(); @@ -26993,14 +27209,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1038 = 0; - $_etype1041 = 0; - $xfer += $input->readListBegin($_etype1041, $_size1038); - for ($_i1042 = 0; $_i1042 < $_size1038; ++$_i1042) + $_size1047 = 0; + $_etype1050 = 0; + $xfer += $input->readListBegin($_etype1050, $_size1047); + for ($_i1051 = 0; $_i1051 < $_size1047; ++$_i1051) { - $elem1043 = null; - $xfer += $input->readString($elem1043); - $this->part_vals []= $elem1043; + $elem1052 = null; + $xfer += $input->readString($elem1052); + $this->part_vals []= $elem1052; } $xfer += $input->readListEnd(); } else { @@ -27046,9 +27262,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1044) + foreach ($this->part_vals as $iter1053) { - $xfer += $output->writeString($iter1044); + $xfer += $output->writeString($iter1053); } } $output->writeListEnd(); @@ -27902,14 +28118,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1045 = 0; - $_etype1048 = 0; - $xfer += $input->readListBegin($_etype1048, $_size1045); - for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049) + $_size1054 = 0; + $_etype1057 = 0; + $xfer += $input->readListBegin($_etype1057, $_size1054); + for ($_i1058 = 0; $_i1058 < $_size1054; ++$_i1058) { - $elem1050 = null; - $xfer += $input->readString($elem1050); - $this->part_vals []= $elem1050; + $elem1059 = null; + $xfer += $input->readString($elem1059); + $this->part_vals []= $elem1059; } $xfer += $input->readListEnd(); } else { @@ -27954,9 +28170,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1051) + foreach ($this->part_vals as $iter1060) { - $xfer += $output->writeString($iter1051); + $xfer += $output->writeString($iter1060); } } $output->writeListEnd(); @@ -28209,14 +28425,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1052 = 0; - $_etype1055 = 0; - $xfer += $input->readListBegin($_etype1055, $_size1052); - for ($_i1056 = 0; $_i1056 < $_size1052; ++$_i1056) + $_size1061 = 0; + $_etype1064 = 0; + $xfer += $input->readListBegin($_etype1064, $_size1061); + for ($_i1065 = 0; $_i1065 < $_size1061; ++$_i1065) { - $elem1057 = null; - $xfer += $input->readString($elem1057); - $this->part_vals []= $elem1057; + $elem1066 = null; + $xfer += $input->readString($elem1066); + $this->part_vals []= $elem1066; } $xfer += $input->readListEnd(); } else { @@ -28269,9 +28485,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1058) + foreach ($this->part_vals as $iter1067) { - $xfer += $output->writeString($iter1058); + $xfer += $output->writeString($iter1067); } } $output->writeListEnd(); @@ -29285,14 +29501,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1059 = 0; - $_etype1062 = 0; - $xfer += $input->readListBegin($_etype1062, $_size1059); - for ($_i1063 = 0; $_i1063 < $_size1059; ++$_i1063) + $_size1068 = 0; + $_etype1071 = 0; + $xfer += $input->readListBegin($_etype1071, $_size1068); + for ($_i1072 = 0; $_i1072 < $_size1068; ++$_i1072) { - $elem1064 = null; - $xfer += $input->readString($elem1064); - $this->part_vals []= $elem1064; + $elem1073 = null; + $xfer += $input->readString($elem1073); + $this->part_vals []= $elem1073; } $xfer += $input->readListEnd(); } else { @@ -29330,9 +29546,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1065) + foreach ($this->part_vals as $iter1074) { - $xfer += $output->writeString($iter1065); + $xfer += $output->writeString($iter1074); } } $output->writeListEnd(); @@ -29574,17 +29790,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1066 = 0; - $_ktype1067 = 0; - $_vtype1068 = 0; - $xfer += $input->readMapBegin($_ktype1067, $_vtype1068, $_size1066); - for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070) + $_size1075 = 0; + $_ktype1076 = 0; + $_vtype1077 = 0; + $xfer += $input->readMapBegin($_ktype1076, $_vtype1077, $_size1075); + for ($_i1079 = 0; $_i1079 < $_size1075; ++$_i1079) { - $key1071 = ''; - $val1072 = ''; - $xfer += $input->readString($key1071); - $xfer += $input->readString($val1072); - $this->partitionSpecs[$key1071] = $val1072; + $key1080 = ''; + $val1081 = ''; + $xfer += $input->readString($key1080); + $xfer += $input->readString($val1081); + $this->partitionSpecs[$key1080] = $val1081; } $xfer += $input->readMapEnd(); } else { @@ -29640,10 +29856,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1073 => $viter1074) + foreach ($this->partitionSpecs as $kiter1082 => $viter1083) { - $xfer += $output->writeString($kiter1073); - $xfer += $output->writeString($viter1074); + $xfer += $output->writeString($kiter1082); + $xfer += $output->writeString($viter1083); } } $output->writeMapEnd(); @@ -29955,17 +30171,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1075 = 0; - $_ktype1076 = 0; - $_vtype1077 = 0; - $xfer += $input->readMapBegin($_ktype1076, $_vtype1077, $_size1075); - for ($_i1079 = 0; $_i1079 < $_size1075; ++$_i1079) + $_size1084 = 0; + $_ktype1085 = 0; + $_vtype1086 = 0; + $xfer += $input->readMapBegin($_ktype1085, $_vtype1086, $_size1084); + for ($_i1088 = 0; $_i1088 < $_size1084; ++$_i1088) { - $key1080 = ''; - $val1081 = ''; - $xfer += $input->readString($key1080); - $xfer += $input->readString($val1081); - $this->partitionSpecs[$key1080] = $val1081; + $key1089 = ''; + $val1090 = ''; + $xfer += $input->readString($key1089); + $xfer += $input->readString($val1090); + $this->partitionSpecs[$key1089] = $val1090; } $xfer += $input->readMapEnd(); } else { @@ -30021,10 +30237,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1082 => $viter1083) + foreach ($this->partitionSpecs as $kiter1091 => $viter1092) { - $xfer += $output->writeString($kiter1082); - $xfer += $output->writeString($viter1083); + $xfer += $output->writeString($kiter1091); + $xfer += $output->writeString($viter1092); } } $output->writeMapEnd(); @@ -30157,15 +30373,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1084 = 0; - $_etype1087 = 0; - $xfer += $input->readListBegin($_etype1087, $_size1084); - for ($_i1088 = 0; $_i1088 < $_size1084; ++$_i1088) + $_size1093 = 0; + $_etype1096 = 0; + $xfer += $input->readListBegin($_etype1096, $_size1093); + for ($_i1097 = 0; $_i1097 < $_size1093; ++$_i1097) { - $elem1089 = null; - $elem1089 = new \metastore\Partition(); - $xfer += $elem1089->read($input); - $this->success []= $elem1089; + $elem1098 = null; + $elem1098 = new \metastore\Partition(); + $xfer += $elem1098->read($input); + $this->success []= $elem1098; } $xfer += $input->readListEnd(); } else { @@ -30225,9 +30441,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1090) + foreach ($this->success as $iter1099) { - $xfer += $iter1090->write($output); + $xfer += $iter1099->write($output); } } $output->writeListEnd(); @@ -30373,14 +30589,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1091 = 0; - $_etype1094 = 0; - $xfer += $input->readListBegin($_etype1094, $_size1091); - for ($_i1095 = 0; $_i1095 < $_size1091; ++$_i1095) + $_size1100 = 0; + $_etype1103 = 0; + $xfer += $input->readListBegin($_etype1103, $_size1100); + for ($_i1104 = 0; $_i1104 < $_size1100; ++$_i1104) { - $elem1096 = null; - $xfer += $input->readString($elem1096); - $this->part_vals []= $elem1096; + $elem1105 = null; + $xfer += $input->readString($elem1105); + $this->part_vals []= $elem1105; } $xfer += $input->readListEnd(); } else { @@ -30397,14 +30613,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1097 = 0; - $_etype1100 = 0; - $xfer += $input->readListBegin($_etype1100, $_size1097); - for ($_i1101 = 0; $_i1101 < $_size1097; ++$_i1101) + $_size1106 = 0; + $_etype1109 = 0; + $xfer += $input->readListBegin($_etype1109, $_size1106); + for ($_i1110 = 0; $_i1110 < $_size1106; ++$_i1110) { - $elem1102 = null; - $xfer += $input->readString($elem1102); - $this->group_names []= $elem1102; + $elem1111 = null; + $xfer += $input->readString($elem1111); + $this->group_names []= $elem1111; } $xfer += $input->readListEnd(); } else { @@ -30442,9 +30658,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1103) + foreach ($this->part_vals as $iter1112) { - $xfer += $output->writeString($iter1103); + $xfer += $output->writeString($iter1112); } } $output->writeListEnd(); @@ -30464,9 +30680,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1104) + foreach ($this->group_names as $iter1113) { - $xfer += $output->writeString($iter1104); + $xfer += $output->writeString($iter1113); } } $output->writeListEnd(); @@ -31057,15 +31273,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1105 = 0; - $_etype1108 = 0; - $xfer += $input->readListBegin($_etype1108, $_size1105); - for ($_i1109 = 0; $_i1109 < $_size1105; ++$_i1109) + $_size1114 = 0; + $_etype1117 = 0; + $xfer += $input->readListBegin($_etype1117, $_size1114); + for ($_i1118 = 0; $_i1118 < $_size1114; ++$_i1118) { - $elem1110 = null; - $elem1110 = new \metastore\Partition(); - $xfer += $elem1110->read($input); - $this->success []= $elem1110; + $elem1119 = null; + $elem1119 = new \metastore\Partition(); + $xfer += $elem1119->read($input); + $this->success []= $elem1119; } $xfer += $input->readListEnd(); } else { @@ -31109,9 +31325,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1111) + foreach ($this->success as $iter1120) { - $xfer += $iter1111->write($output); + $xfer += $iter1120->write($output); } } $output->writeListEnd(); @@ -31257,14 +31473,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1112 = 0; - $_etype1115 = 0; - $xfer += $input->readListBegin($_etype1115, $_size1112); - for ($_i1116 = 0; $_i1116 < $_size1112; ++$_i1116) + $_size1121 = 0; + $_etype1124 = 0; + $xfer += $input->readListBegin($_etype1124, $_size1121); + for ($_i1125 = 0; $_i1125 < $_size1121; ++$_i1125) { - $elem1117 = null; - $xfer += $input->readString($elem1117); - $this->group_names []= $elem1117; + $elem1126 = null; + $xfer += $input->readString($elem1126); + $this->group_names []= $elem1126; } $xfer += $input->readListEnd(); } else { @@ -31312,9 +31528,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1118) + foreach ($this->group_names as $iter1127) { - $xfer += $output->writeString($iter1118); + $xfer += $output->writeString($iter1127); } } $output->writeListEnd(); @@ -31403,15 +31619,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1119 = 0; - $_etype1122 = 0; - $xfer += $input->readListBegin($_etype1122, $_size1119); - for ($_i1123 = 0; $_i1123 < $_size1119; ++$_i1123) + $_size1128 = 0; + $_etype1131 = 0; + $xfer += $input->readListBegin($_etype1131, $_size1128); + for ($_i1132 = 0; $_i1132 < $_size1128; ++$_i1132) { - $elem1124 = null; - $elem1124 = new \metastore\Partition(); - $xfer += $elem1124->read($input); - $this->success []= $elem1124; + $elem1133 = null; + $elem1133 = new \metastore\Partition(); + $xfer += $elem1133->read($input); + $this->success []= $elem1133; } $xfer += $input->readListEnd(); } else { @@ -31455,9 +31671,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1125) + foreach ($this->success as $iter1134) { - $xfer += $iter1125->write($output); + $xfer += $iter1134->write($output); } } $output->writeListEnd(); @@ -31677,15 +31893,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1126 = 0; - $_etype1129 = 0; - $xfer += $input->readListBegin($_etype1129, $_size1126); - for ($_i1130 = 0; $_i1130 < $_size1126; ++$_i1130) + $_size1135 = 0; + $_etype1138 = 0; + $xfer += $input->readListBegin($_etype1138, $_size1135); + for ($_i1139 = 0; $_i1139 < $_size1135; ++$_i1139) { - $elem1131 = null; - $elem1131 = new \metastore\PartitionSpec(); - $xfer += $elem1131->read($input); - $this->success []= $elem1131; + $elem1140 = null; + $elem1140 = new \metastore\PartitionSpec(); + $xfer += $elem1140->read($input); + $this->success []= $elem1140; } $xfer += $input->readListEnd(); } else { @@ -31729,9 +31945,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1132) + foreach ($this->success as $iter1141) { - $xfer += $iter1132->write($output); + $xfer += $iter1141->write($output); } } $output->writeListEnd(); @@ -31950,14 +32166,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1133 = 0; - $_etype1136 = 0; - $xfer += $input->readListBegin($_etype1136, $_size1133); - for ($_i1137 = 0; $_i1137 < $_size1133; ++$_i1137) + $_size1142 = 0; + $_etype1145 = 0; + $xfer += $input->readListBegin($_etype1145, $_size1142); + for ($_i1146 = 0; $_i1146 < $_size1142; ++$_i1146) { - $elem1138 = null; - $xfer += $input->readString($elem1138); - $this->success []= $elem1138; + $elem1147 = null; + $xfer += $input->readString($elem1147); + $this->success []= $elem1147; } $xfer += $input->readListEnd(); } else { @@ -32001,9 +32217,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1139) + foreach ($this->success as $iter1148) { - $xfer += $output->writeString($iter1139); + $xfer += $output->writeString($iter1148); } } $output->writeListEnd(); @@ -32334,14 +32550,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1140 = 0; - $_etype1143 = 0; - $xfer += $input->readListBegin($_etype1143, $_size1140); - for ($_i1144 = 0; $_i1144 < $_size1140; ++$_i1144) + $_size1149 = 0; + $_etype1152 = 0; + $xfer += $input->readListBegin($_etype1152, $_size1149); + for ($_i1153 = 0; $_i1153 < $_size1149; ++$_i1153) { - $elem1145 = null; - $xfer += $input->readString($elem1145); - $this->part_vals []= $elem1145; + $elem1154 = null; + $xfer += $input->readString($elem1154); + $this->part_vals []= $elem1154; } $xfer += $input->readListEnd(); } else { @@ -32386,9 +32602,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1146) + foreach ($this->part_vals as $iter1155) { - $xfer += $output->writeString($iter1146); + $xfer += $output->writeString($iter1155); } } $output->writeListEnd(); @@ -32482,15 +32698,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1147 = 0; - $_etype1150 = 0; - $xfer += $input->readListBegin($_etype1150, $_size1147); - for ($_i1151 = 0; $_i1151 < $_size1147; ++$_i1151) + $_size1156 = 0; + $_etype1159 = 0; + $xfer += $input->readListBegin($_etype1159, $_size1156); + for ($_i1160 = 0; $_i1160 < $_size1156; ++$_i1160) { - $elem1152 = null; - $elem1152 = new \metastore\Partition(); - $xfer += $elem1152->read($input); - $this->success []= $elem1152; + $elem1161 = null; + $elem1161 = new \metastore\Partition(); + $xfer += $elem1161->read($input); + $this->success []= $elem1161; } $xfer += $input->readListEnd(); } else { @@ -32534,9 +32750,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1153) + foreach ($this->success as $iter1162) { - $xfer += $iter1153->write($output); + $xfer += $iter1162->write($output); } } $output->writeListEnd(); @@ -32683,14 +32899,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1154 = 0; - $_etype1157 = 0; - $xfer += $input->readListBegin($_etype1157, $_size1154); - for ($_i1158 = 0; $_i1158 < $_size1154; ++$_i1158) + $_size1163 = 0; + $_etype1166 = 0; + $xfer += $input->readListBegin($_etype1166, $_size1163); + for ($_i1167 = 0; $_i1167 < $_size1163; ++$_i1167) { - $elem1159 = null; - $xfer += $input->readString($elem1159); - $this->part_vals []= $elem1159; + $elem1168 = null; + $xfer += $input->readString($elem1168); + $this->part_vals []= $elem1168; } $xfer += $input->readListEnd(); } else { @@ -32714,14 +32930,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1160 = 0; - $_etype1163 = 0; - $xfer += $input->readListBegin($_etype1163, $_size1160); - for ($_i1164 = 0; $_i1164 < $_size1160; ++$_i1164) + $_size1169 = 0; + $_etype1172 = 0; + $xfer += $input->readListBegin($_etype1172, $_size1169); + for ($_i1173 = 0; $_i1173 < $_size1169; ++$_i1173) { - $elem1165 = null; - $xfer += $input->readString($elem1165); - $this->group_names []= $elem1165; + $elem1174 = null; + $xfer += $input->readString($elem1174); + $this->group_names []= $elem1174; } $xfer += $input->readListEnd(); } else { @@ -32759,9 +32975,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1166) + foreach ($this->part_vals as $iter1175) { - $xfer += $output->writeString($iter1166); + $xfer += $output->writeString($iter1175); } } $output->writeListEnd(); @@ -32786,9 +33002,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1167) + foreach ($this->group_names as $iter1176) { - $xfer += $output->writeString($iter1167); + $xfer += $output->writeString($iter1176); } } $output->writeListEnd(); @@ -32877,15 +33093,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1168 = 0; - $_etype1171 = 0; - $xfer += $input->readListBegin($_etype1171, $_size1168); - for ($_i1172 = 0; $_i1172 < $_size1168; ++$_i1172) + $_size1177 = 0; + $_etype1180 = 0; + $xfer += $input->readListBegin($_etype1180, $_size1177); + for ($_i1181 = 0; $_i1181 < $_size1177; ++$_i1181) { - $elem1173 = null; - $elem1173 = new \metastore\Partition(); - $xfer += $elem1173->read($input); - $this->success []= $elem1173; + $elem1182 = null; + $elem1182 = new \metastore\Partition(); + $xfer += $elem1182->read($input); + $this->success []= $elem1182; } $xfer += $input->readListEnd(); } else { @@ -32929,9 +33145,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1174) + foreach ($this->success as $iter1183) { - $xfer += $iter1174->write($output); + $xfer += $iter1183->write($output); } } $output->writeListEnd(); @@ -33052,14 +33268,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1175 = 0; - $_etype1178 = 0; - $xfer += $input->readListBegin($_etype1178, $_size1175); - for ($_i1179 = 0; $_i1179 < $_size1175; ++$_i1179) + $_size1184 = 0; + $_etype1187 = 0; + $xfer += $input->readListBegin($_etype1187, $_size1184); + for ($_i1188 = 0; $_i1188 < $_size1184; ++$_i1188) { - $elem1180 = null; - $xfer += $input->readString($elem1180); - $this->part_vals []= $elem1180; + $elem1189 = null; + $xfer += $input->readString($elem1189); + $this->part_vals []= $elem1189; } $xfer += $input->readListEnd(); } else { @@ -33104,9 +33320,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1181) + foreach ($this->part_vals as $iter1190) { - $xfer += $output->writeString($iter1181); + $xfer += $output->writeString($iter1190); } } $output->writeListEnd(); @@ -33199,14 +33415,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1182 = 0; - $_etype1185 = 0; - $xfer += $input->readListBegin($_etype1185, $_size1182); - for ($_i1186 = 0; $_i1186 < $_size1182; ++$_i1186) + $_size1191 = 0; + $_etype1194 = 0; + $xfer += $input->readListBegin($_etype1194, $_size1191); + for ($_i1195 = 0; $_i1195 < $_size1191; ++$_i1195) { - $elem1187 = null; - $xfer += $input->readString($elem1187); - $this->success []= $elem1187; + $elem1196 = null; + $xfer += $input->readString($elem1196); + $this->success []= $elem1196; } $xfer += $input->readListEnd(); } else { @@ -33250,9 +33466,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1188) + foreach ($this->success as $iter1197) { - $xfer += $output->writeString($iter1188); + $xfer += $output->writeString($iter1197); } } $output->writeListEnd(); @@ -33495,15 +33711,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1189 = 0; - $_etype1192 = 0; - $xfer += $input->readListBegin($_etype1192, $_size1189); - for ($_i1193 = 0; $_i1193 < $_size1189; ++$_i1193) + $_size1198 = 0; + $_etype1201 = 0; + $xfer += $input->readListBegin($_etype1201, $_size1198); + for ($_i1202 = 0; $_i1202 < $_size1198; ++$_i1202) { - $elem1194 = null; - $elem1194 = new \metastore\Partition(); - $xfer += $elem1194->read($input); - $this->success []= $elem1194; + $elem1203 = null; + $elem1203 = new \metastore\Partition(); + $xfer += $elem1203->read($input); + $this->success []= $elem1203; } $xfer += $input->readListEnd(); } else { @@ -33547,9 +33763,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1195) + foreach ($this->success as $iter1204) { - $xfer += $iter1195->write($output); + $xfer += $iter1204->write($output); } } $output->writeListEnd(); @@ -33792,15 +34008,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1196 = 0; - $_etype1199 = 0; - $xfer += $input->readListBegin($_etype1199, $_size1196); - for ($_i1200 = 0; $_i1200 < $_size1196; ++$_i1200) + $_size1205 = 0; + $_etype1208 = 0; + $xfer += $input->readListBegin($_etype1208, $_size1205); + for ($_i1209 = 0; $_i1209 < $_size1205; ++$_i1209) { - $elem1201 = null; - $elem1201 = new \metastore\PartitionSpec(); - $xfer += $elem1201->read($input); - $this->success []= $elem1201; + $elem1210 = null; + $elem1210 = new \metastore\PartitionSpec(); + $xfer += $elem1210->read($input); + $this->success []= $elem1210; } $xfer += $input->readListEnd(); } else { @@ -33844,9 +34060,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1202) + foreach ($this->success as $iter1211) { - $xfer += $iter1202->write($output); + $xfer += $iter1211->write($output); } } $output->writeListEnd(); @@ -34412,14 +34628,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1203 = 0; - $_etype1206 = 0; - $xfer += $input->readListBegin($_etype1206, $_size1203); - for ($_i1207 = 0; $_i1207 < $_size1203; ++$_i1207) + $_size1212 = 0; + $_etype1215 = 0; + $xfer += $input->readListBegin($_etype1215, $_size1212); + for ($_i1216 = 0; $_i1216 < $_size1212; ++$_i1216) { - $elem1208 = null; - $xfer += $input->readString($elem1208); - $this->names []= $elem1208; + $elem1217 = null; + $xfer += $input->readString($elem1217); + $this->names []= $elem1217; } $xfer += $input->readListEnd(); } else { @@ -34457,9 +34673,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1209) + foreach ($this->names as $iter1218) { - $xfer += $output->writeString($iter1209); + $xfer += $output->writeString($iter1218); } } $output->writeListEnd(); @@ -34548,15 +34764,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1210 = 0; - $_etype1213 = 0; - $xfer += $input->readListBegin($_etype1213, $_size1210); - for ($_i1214 = 0; $_i1214 < $_size1210; ++$_i1214) + $_size1219 = 0; + $_etype1222 = 0; + $xfer += $input->readListBegin($_etype1222, $_size1219); + for ($_i1223 = 0; $_i1223 < $_size1219; ++$_i1223) { - $elem1215 = null; - $elem1215 = new \metastore\Partition(); - $xfer += $elem1215->read($input); - $this->success []= $elem1215; + $elem1224 = null; + $elem1224 = new \metastore\Partition(); + $xfer += $elem1224->read($input); + $this->success []= $elem1224; } $xfer += $input->readListEnd(); } else { @@ -34600,9 +34816,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1216) + foreach ($this->success as $iter1225) { - $xfer += $iter1216->write($output); + $xfer += $iter1225->write($output); } } $output->writeListEnd(); @@ -34941,15 +35157,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1217 = 0; - $_etype1220 = 0; - $xfer += $input->readListBegin($_etype1220, $_size1217); - for ($_i1221 = 0; $_i1221 < $_size1217; ++$_i1221) + $_size1226 = 0; + $_etype1229 = 0; + $xfer += $input->readListBegin($_etype1229, $_size1226); + for ($_i1230 = 0; $_i1230 < $_size1226; ++$_i1230) { - $elem1222 = null; - $elem1222 = new \metastore\Partition(); - $xfer += $elem1222->read($input); - $this->new_parts []= $elem1222; + $elem1231 = null; + $elem1231 = new \metastore\Partition(); + $xfer += $elem1231->read($input); + $this->new_parts []= $elem1231; } $xfer += $input->readListEnd(); } else { @@ -34987,9 +35203,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1223) + foreach ($this->new_parts as $iter1232) { - $xfer += $iter1223->write($output); + $xfer += $iter1232->write($output); } } $output->writeListEnd(); @@ -35204,15 +35420,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1224 = 0; - $_etype1227 = 0; - $xfer += $input->readListBegin($_etype1227, $_size1224); - for ($_i1228 = 0; $_i1228 < $_size1224; ++$_i1228) + $_size1233 = 0; + $_etype1236 = 0; + $xfer += $input->readListBegin($_etype1236, $_size1233); + for ($_i1237 = 0; $_i1237 < $_size1233; ++$_i1237) { - $elem1229 = null; - $elem1229 = new \metastore\Partition(); - $xfer += $elem1229->read($input); - $this->new_parts []= $elem1229; + $elem1238 = null; + $elem1238 = new \metastore\Partition(); + $xfer += $elem1238->read($input); + $this->new_parts []= $elem1238; } $xfer += $input->readListEnd(); } else { @@ -35258,9 +35474,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1230) + foreach ($this->new_parts as $iter1239) { - $xfer += $iter1230->write($output); + $xfer += $iter1239->write($output); } } $output->writeListEnd(); @@ -35948,14 +36164,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1231 = 0; - $_etype1234 = 0; - $xfer += $input->readListBegin($_etype1234, $_size1231); - for ($_i1235 = 0; $_i1235 < $_size1231; ++$_i1235) + $_size1240 = 0; + $_etype1243 = 0; + $xfer += $input->readListBegin($_etype1243, $_size1240); + for ($_i1244 = 0; $_i1244 < $_size1240; ++$_i1244) { - $elem1236 = null; - $xfer += $input->readString($elem1236); - $this->part_vals []= $elem1236; + $elem1245 = null; + $xfer += $input->readString($elem1245); + $this->part_vals []= $elem1245; } $xfer += $input->readListEnd(); } else { @@ -36001,9 +36217,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1237) + foreach ($this->part_vals as $iter1246) { - $xfer += $output->writeString($iter1237); + $xfer += $output->writeString($iter1246); } } $output->writeListEnd(); @@ -36398,14 +36614,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1238 = 0; - $_etype1241 = 0; - $xfer += $input->readListBegin($_etype1241, $_size1238); - for ($_i1242 = 0; $_i1242 < $_size1238; ++$_i1242) + $_size1247 = 0; + $_etype1250 = 0; + $xfer += $input->readListBegin($_etype1250, $_size1247); + for ($_i1251 = 0; $_i1251 < $_size1247; ++$_i1251) { - $elem1243 = null; - $xfer += $input->readString($elem1243); - $this->part_vals []= $elem1243; + $elem1252 = null; + $xfer += $input->readString($elem1252); + $this->part_vals []= $elem1252; } $xfer += $input->readListEnd(); } else { @@ -36440,9 +36656,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1244) + foreach ($this->part_vals as $iter1253) { - $xfer += $output->writeString($iter1244); + $xfer += $output->writeString($iter1253); } } $output->writeListEnd(); @@ -36896,14 +37112,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1245 = 0; - $_etype1248 = 0; - $xfer += $input->readListBegin($_etype1248, $_size1245); - for ($_i1249 = 0; $_i1249 < $_size1245; ++$_i1249) + $_size1254 = 0; + $_etype1257 = 0; + $xfer += $input->readListBegin($_etype1257, $_size1254); + for ($_i1258 = 0; $_i1258 < $_size1254; ++$_i1258) { - $elem1250 = null; - $xfer += $input->readString($elem1250); - $this->success []= $elem1250; + $elem1259 = null; + $xfer += $input->readString($elem1259); + $this->success []= $elem1259; } $xfer += $input->readListEnd(); } else { @@ -36939,9 +37155,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1251) + foreach ($this->success as $iter1260) { - $xfer += $output->writeString($iter1251); + $xfer += $output->writeString($iter1260); } } $output->writeListEnd(); @@ -37101,17 +37317,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1252 = 0; - $_ktype1253 = 0; - $_vtype1254 = 0; - $xfer += $input->readMapBegin($_ktype1253, $_vtype1254, $_size1252); - for ($_i1256 = 0; $_i1256 < $_size1252; ++$_i1256) + $_size1261 = 0; + $_ktype1262 = 0; + $_vtype1263 = 0; + $xfer += $input->readMapBegin($_ktype1262, $_vtype1263, $_size1261); + for ($_i1265 = 0; $_i1265 < $_size1261; ++$_i1265) { - $key1257 = ''; - $val1258 = ''; - $xfer += $input->readString($key1257); - $xfer += $input->readString($val1258); - $this->success[$key1257] = $val1258; + $key1266 = ''; + $val1267 = ''; + $xfer += $input->readString($key1266); + $xfer += $input->readString($val1267); + $this->success[$key1266] = $val1267; } $xfer += $input->readMapEnd(); } else { @@ -37147,10 +37363,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1259 => $viter1260) + foreach ($this->success as $kiter1268 => $viter1269) { - $xfer += $output->writeString($kiter1259); - $xfer += $output->writeString($viter1260); + $xfer += $output->writeString($kiter1268); + $xfer += $output->writeString($viter1269); } } $output->writeMapEnd(); @@ -37270,17 +37486,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1261 = 0; - $_ktype1262 = 0; - $_vtype1263 = 0; - $xfer += $input->readMapBegin($_ktype1262, $_vtype1263, $_size1261); - for ($_i1265 = 0; $_i1265 < $_size1261; ++$_i1265) + $_size1270 = 0; + $_ktype1271 = 0; + $_vtype1272 = 0; + $xfer += $input->readMapBegin($_ktype1271, $_vtype1272, $_size1270); + for ($_i1274 = 0; $_i1274 < $_size1270; ++$_i1274) { - $key1266 = ''; - $val1267 = ''; - $xfer += $input->readString($key1266); - $xfer += $input->readString($val1267); - $this->part_vals[$key1266] = $val1267; + $key1275 = ''; + $val1276 = ''; + $xfer += $input->readString($key1275); + $xfer += $input->readString($val1276); + $this->part_vals[$key1275] = $val1276; } $xfer += $input->readMapEnd(); } else { @@ -37325,10 +37541,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1268 => $viter1269) + foreach ($this->part_vals as $kiter1277 => $viter1278) { - $xfer += $output->writeString($kiter1268); - $xfer += $output->writeString($viter1269); + $xfer += $output->writeString($kiter1277); + $xfer += $output->writeString($viter1278); } } $output->writeMapEnd(); @@ -37650,17 +37866,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1270 = 0; - $_ktype1271 = 0; - $_vtype1272 = 0; - $xfer += $input->readMapBegin($_ktype1271, $_vtype1272, $_size1270); - for ($_i1274 = 0; $_i1274 < $_size1270; ++$_i1274) + $_size1279 = 0; + $_ktype1280 = 0; + $_vtype1281 = 0; + $xfer += $input->readMapBegin($_ktype1280, $_vtype1281, $_size1279); + for ($_i1283 = 0; $_i1283 < $_size1279; ++$_i1283) { - $key1275 = ''; - $val1276 = ''; - $xfer += $input->readString($key1275); - $xfer += $input->readString($val1276); - $this->part_vals[$key1275] = $val1276; + $key1284 = ''; + $val1285 = ''; + $xfer += $input->readString($key1284); + $xfer += $input->readString($val1285); + $this->part_vals[$key1284] = $val1285; } $xfer += $input->readMapEnd(); } else { @@ -37705,10 +37921,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1277 => $viter1278) + foreach ($this->part_vals as $kiter1286 => $viter1287) { - $xfer += $output->writeString($kiter1277); - $xfer += $output->writeString($viter1278); + $xfer += $output->writeString($kiter1286); + $xfer += $output->writeString($viter1287); } } $output->writeMapEnd(); @@ -40242,6 +40458,786 @@ class ThriftHiveMetastore_update_partition_column_statistics_req_result { } +class ThriftHiveMetastore_update_table_basic_statistics_req_args { + static $_TSPEC; + + /** + * @var \metastore\SetBasicStatsRequest + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\SetBasicStatsRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_update_table_basic_statistics_req_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\SetBasicStatsRequest(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_table_basic_statistics_req_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_update_table_basic_statistics_req_result { + static $_TSPEC; + + /** + * @var \metastore\SetBasicStatsResponse + */ + public $success = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\InvalidObjectException + */ + public $o2 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; + /** + * @var \metastore\InvalidInputException + */ + public $o4 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\SetBasicStatsResponse', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidObjectException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + 4 => array( + 'var' => 'o4', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidInputException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + if (isset($vals['o4'])) { + $this->o4 = $vals['o4']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_update_table_basic_statistics_req_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\SetBasicStatsResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\InvalidObjectException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRUCT) { + $this->o4 = new \metastore\InvalidInputException(); + $xfer += $this->o4->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_table_basic_statistics_req_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o4 !== null) { + $xfer += $output->writeFieldBegin('o4', TType::STRUCT, 4); + $xfer += $this->o4->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_update_partition_basic_statistics_req_args { + static $_TSPEC; + + /** + * @var \metastore\SetBasicStatsRequest + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\SetBasicStatsRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_update_partition_basic_statistics_req_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\SetBasicStatsRequest(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_partition_basic_statistics_req_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_update_partition_basic_statistics_req_result { + static $_TSPEC; + + /** + * @var \metastore\SetBasicStatsResponse + */ + public $success = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\InvalidObjectException + */ + public $o2 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; + /** + * @var \metastore\InvalidInputException + */ + public $o4 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\SetBasicStatsResponse', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidObjectException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + 4 => array( + 'var' => 'o4', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidInputException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + if (isset($vals['o4'])) { + $this->o4 = $vals['o4']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_update_partition_basic_statistics_req_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\SetBasicStatsResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\InvalidObjectException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRUCT) { + $this->o4 = new \metastore\InvalidInputException(); + $xfer += $this->o4->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_partition_basic_statistics_req_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o4 !== null) { + $xfer += $output->writeFieldBegin('o4', TType::STRUCT, 4); + $xfer += $this->o4->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_invalidate_all_column_statistics_req_args { + static $_TSPEC; + + /** + * @var \metastore\InvalidateColumnStatsRequest + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidateColumnStatsRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_invalidate_all_column_statistics_req_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\InvalidateColumnStatsRequest(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_invalidate_all_column_statistics_req_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_invalidate_all_column_statistics_req_result { + static $_TSPEC; + + /** + * @var \metastore\InvalidateColumnStatsResponse + */ + public $success = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\InvalidObjectException + */ + public $o2 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; + /** + * @var \metastore\InvalidInputException + */ + public $o4 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidateColumnStatsResponse', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidObjectException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + 4 => array( + 'var' => 'o4', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidInputException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + if (isset($vals['o4'])) { + $this->o4 = $vals['o4']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_invalidate_all_column_statistics_req_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\InvalidateColumnStatsResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\InvalidObjectException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRUCT) { + $this->o4 = new \metastore\InvalidInputException(); + $xfer += $this->o4->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_invalidate_all_column_statistics_req_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o4 !== null) { + $xfer += $output->writeFieldBegin('o4', TType::STRUCT, 4); + $xfer += $this->o4->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_get_table_column_statistics_args { static $_TSPEC; @@ -43187,14 +44183,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1279 = 0; - $_etype1282 = 0; - $xfer += $input->readListBegin($_etype1282, $_size1279); - for ($_i1283 = 0; $_i1283 < $_size1279; ++$_i1283) + $_size1288 = 0; + $_etype1291 = 0; + $xfer += $input->readListBegin($_etype1291, $_size1288); + for ($_i1292 = 0; $_i1292 < $_size1288; ++$_i1292) { - $elem1284 = null; - $xfer += $input->readString($elem1284); - $this->success []= $elem1284; + $elem1293 = null; + $xfer += $input->readString($elem1293); + $this->success []= $elem1293; } $xfer += $input->readListEnd(); } else { @@ -43230,9 +44226,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1285) + foreach ($this->success as $iter1294) { - $xfer += $output->writeString($iter1285); + $xfer += $output->writeString($iter1294); } } $output->writeListEnd(); @@ -44101,14 +45097,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1286 = 0; - $_etype1289 = 0; - $xfer += $input->readListBegin($_etype1289, $_size1286); - for ($_i1290 = 0; $_i1290 < $_size1286; ++$_i1290) + $_size1295 = 0; + $_etype1298 = 0; + $xfer += $input->readListBegin($_etype1298, $_size1295); + for ($_i1299 = 0; $_i1299 < $_size1295; ++$_i1299) { - $elem1291 = null; - $xfer += $input->readString($elem1291); - $this->success []= $elem1291; + $elem1300 = null; + $xfer += $input->readString($elem1300); + $this->success []= $elem1300; } $xfer += $input->readListEnd(); } else { @@ -44144,9 +45140,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1292) + foreach ($this->success as $iter1301) { - $xfer += $output->writeString($iter1292); + $xfer += $output->writeString($iter1301); } } $output->writeListEnd(); @@ -44837,15 +45833,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1293 = 0; - $_etype1296 = 0; - $xfer += $input->readListBegin($_etype1296, $_size1293); - for ($_i1297 = 0; $_i1297 < $_size1293; ++$_i1297) + $_size1302 = 0; + $_etype1305 = 0; + $xfer += $input->readListBegin($_etype1305, $_size1302); + for ($_i1306 = 0; $_i1306 < $_size1302; ++$_i1306) { - $elem1298 = null; - $elem1298 = new \metastore\Role(); - $xfer += $elem1298->read($input); - $this->success []= $elem1298; + $elem1307 = null; + $elem1307 = new \metastore\Role(); + $xfer += $elem1307->read($input); + $this->success []= $elem1307; } $xfer += $input->readListEnd(); } else { @@ -44881,9 +45877,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1299) + foreach ($this->success as $iter1308) { - $xfer += $iter1299->write($output); + $xfer += $iter1308->write($output); } } $output->writeListEnd(); @@ -45545,14 +46541,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1300 = 0; - $_etype1303 = 0; - $xfer += $input->readListBegin($_etype1303, $_size1300); - for ($_i1304 = 0; $_i1304 < $_size1300; ++$_i1304) + $_size1309 = 0; + $_etype1312 = 0; + $xfer += $input->readListBegin($_etype1312, $_size1309); + for ($_i1313 = 0; $_i1313 < $_size1309; ++$_i1313) { - $elem1305 = null; - $xfer += $input->readString($elem1305); - $this->group_names []= $elem1305; + $elem1314 = null; + $xfer += $input->readString($elem1314); + $this->group_names []= $elem1314; } $xfer += $input->readListEnd(); } else { @@ -45593,9 +46589,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1306) + foreach ($this->group_names as $iter1315) { - $xfer += $output->writeString($iter1306); + $xfer += $output->writeString($iter1315); } } $output->writeListEnd(); @@ -45903,15 +46899,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1307 = 0; - $_etype1310 = 0; - $xfer += $input->readListBegin($_etype1310, $_size1307); - for ($_i1311 = 0; $_i1311 < $_size1307; ++$_i1311) + $_size1316 = 0; + $_etype1319 = 0; + $xfer += $input->readListBegin($_etype1319, $_size1316); + for ($_i1320 = 0; $_i1320 < $_size1316; ++$_i1320) { - $elem1312 = null; - $elem1312 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1312->read($input); - $this->success []= $elem1312; + $elem1321 = null; + $elem1321 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1321->read($input); + $this->success []= $elem1321; } $xfer += $input->readListEnd(); } else { @@ -45947,9 +46943,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1313) + foreach ($this->success as $iter1322) { - $xfer += $iter1313->write($output); + $xfer += $iter1322->write($output); } } $output->writeListEnd(); @@ -46817,14 +47813,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1314 = 0; - $_etype1317 = 0; - $xfer += $input->readListBegin($_etype1317, $_size1314); - for ($_i1318 = 0; $_i1318 < $_size1314; ++$_i1318) + $_size1323 = 0; + $_etype1326 = 0; + $xfer += $input->readListBegin($_etype1326, $_size1323); + for ($_i1327 = 0; $_i1327 < $_size1323; ++$_i1327) { - $elem1319 = null; - $xfer += $input->readString($elem1319); - $this->group_names []= $elem1319; + $elem1328 = null; + $xfer += $input->readString($elem1328); + $this->group_names []= $elem1328; } $xfer += $input->readListEnd(); } else { @@ -46857,9 +47853,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1320) + foreach ($this->group_names as $iter1329) { - $xfer += $output->writeString($iter1320); + $xfer += $output->writeString($iter1329); } } $output->writeListEnd(); @@ -46935,14 +47931,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1321 = 0; - $_etype1324 = 0; - $xfer += $input->readListBegin($_etype1324, $_size1321); - for ($_i1325 = 0; $_i1325 < $_size1321; ++$_i1325) + $_size1330 = 0; + $_etype1333 = 0; + $xfer += $input->readListBegin($_etype1333, $_size1330); + for ($_i1334 = 0; $_i1334 < $_size1330; ++$_i1334) { - $elem1326 = null; - $xfer += $input->readString($elem1326); - $this->success []= $elem1326; + $elem1335 = null; + $xfer += $input->readString($elem1335); + $this->success []= $elem1335; } $xfer += $input->readListEnd(); } else { @@ -46978,9 +47974,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1327) + foreach ($this->success as $iter1336) { - $xfer += $output->writeString($iter1327); + $xfer += $output->writeString($iter1336); } } $output->writeListEnd(); @@ -48097,14 +49093,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1328 = 0; - $_etype1331 = 0; - $xfer += $input->readListBegin($_etype1331, $_size1328); - for ($_i1332 = 0; $_i1332 < $_size1328; ++$_i1332) + $_size1337 = 0; + $_etype1340 = 0; + $xfer += $input->readListBegin($_etype1340, $_size1337); + for ($_i1341 = 0; $_i1341 < $_size1337; ++$_i1341) { - $elem1333 = null; - $xfer += $input->readString($elem1333); - $this->success []= $elem1333; + $elem1342 = null; + $xfer += $input->readString($elem1342); + $this->success []= $elem1342; } $xfer += $input->readListEnd(); } else { @@ -48132,9 +49128,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1334) + foreach ($this->success as $iter1343) { - $xfer += $output->writeString($iter1334); + $xfer += $output->writeString($iter1343); } } $output->writeListEnd(); @@ -48773,14 +49769,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1335 = 0; - $_etype1338 = 0; - $xfer += $input->readListBegin($_etype1338, $_size1335); - for ($_i1339 = 0; $_i1339 < $_size1335; ++$_i1339) + $_size1344 = 0; + $_etype1347 = 0; + $xfer += $input->readListBegin($_etype1347, $_size1344); + for ($_i1348 = 0; $_i1348 < $_size1344; ++$_i1348) { - $elem1340 = null; - $xfer += $input->readString($elem1340); - $this->success []= $elem1340; + $elem1349 = null; + $xfer += $input->readString($elem1349); + $this->success []= $elem1349; } $xfer += $input->readListEnd(); } else { @@ -48808,9 +49804,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1341) + foreach ($this->success as $iter1350) { - $xfer += $output->writeString($iter1341); + $xfer += $output->writeString($iter1350); } } $output->writeListEnd(); @@ -59639,15 +60635,15 @@ class ThriftHiveMetastore_get_schema_all_versions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1342 = 0; - $_etype1345 = 0; - $xfer += $input->readListBegin($_etype1345, $_size1342); - for ($_i1346 = 0; $_i1346 < $_size1342; ++$_i1346) + $_size1351 = 0; + $_etype1354 = 0; + $xfer += $input->readListBegin($_etype1354, $_size1351); + for ($_i1355 = 0; $_i1355 < $_size1351; ++$_i1355) { - $elem1347 = null; - $elem1347 = new \metastore\SchemaVersion(); - $xfer += $elem1347->read($input); - $this->success []= $elem1347; + $elem1356 = null; + $elem1356 = new \metastore\SchemaVersion(); + $xfer += $elem1356->read($input); + $this->success []= $elem1356; } $xfer += $input->readListEnd(); } else { @@ -59691,9 +60687,9 @@ class ThriftHiveMetastore_get_schema_all_versions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1348) + foreach ($this->success as $iter1357) { - $xfer += $iter1348->write($output); + $xfer += $iter1357->write($output); } } $output->writeListEnd(); @@ -61562,15 +62558,15 @@ class ThriftHiveMetastore_get_runtime_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1349 = 0; - $_etype1352 = 0; - $xfer += $input->readListBegin($_etype1352, $_size1349); - for ($_i1353 = 0; $_i1353 < $_size1349; ++$_i1353) + $_size1358 = 0; + $_etype1361 = 0; + $xfer += $input->readListBegin($_etype1361, $_size1358); + for ($_i1362 = 0; $_i1362 < $_size1358; ++$_i1362) { - $elem1354 = null; - $elem1354 = new \metastore\RuntimeStat(); - $xfer += $elem1354->read($input); - $this->success []= $elem1354; + $elem1363 = null; + $elem1363 = new \metastore\RuntimeStat(); + $xfer += $elem1363->read($input); + $this->success []= $elem1363; } $xfer += $input->readListEnd(); } else { @@ -61606,9 +62602,9 @@ class ThriftHiveMetastore_get_runtime_stats_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1355) + foreach ($this->success as $iter1364) { - $xfer += $iter1355->write($output); + $xfer += $iter1364->write($output); } } $output->writeListEnd(); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php index 5ed4f71b1d..bcc55dd6c8 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php @@ -9963,6 +9963,14 @@ class ColumnStatisticsObj { * @var \metastore\ColumnStatisticsData */ public $statsData = null; + /** + * @var bool + */ + public $isStatsCompliant = null; + /** + * @var int + */ + public $writeId = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -9980,6 +9988,14 @@ class ColumnStatisticsObj { 'type' => TType::STRUCT, 'class' => '\metastore\ColumnStatisticsData', ), + 4 => array( + 'var' => 'isStatsCompliant', + 'type' => TType::BOOL, + ), + 5 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), ); } if (is_array($vals)) { @@ -9992,6 +10008,12 @@ class ColumnStatisticsObj { if (isset($vals['statsData'])) { $this->statsData = $vals['statsData']; } + if (isset($vals['isStatsCompliant'])) { + $this->isStatsCompliant = $vals['isStatsCompliant']; + } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } } } @@ -10036,6 +10058,20 @@ class ColumnStatisticsObj { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->isStatsCompliant); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -10067,6 +10103,16 @@ class ColumnStatisticsObj { $xfer += $this->statsData->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->isStatsCompliant !== null) { + $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 4); + $xfer += $output->writeBool($this->isStatsCompliant); + $xfer += $output->writeFieldEnd(); + } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 5); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -10275,10 +10321,6 @@ class ColumnStatistics { * @var \metastore\ColumnStatisticsObj[] */ public $statsObj = null; - /** - * @var bool - */ - public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -10297,10 +10339,6 @@ class ColumnStatistics { 'class' => '\metastore\ColumnStatisticsObj', ), ), - 3 => array( - 'var' => 'isStatsCompliant', - 'type' => TType::BOOL, - ), ); } if (is_array($vals)) { @@ -10310,9 +10348,6 @@ class ColumnStatistics { if (isset($vals['statsObj'])) { $this->statsObj = $vals['statsObj']; } - if (isset($vals['isStatsCompliant'])) { - $this->isStatsCompliant = $vals['isStatsCompliant']; - } } } @@ -10361,13 +10396,6 @@ class ColumnStatistics { $xfer += $input->skip($ftype); } break; - case 3: - if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->isStatsCompliant); - } else { - $xfer += $input->skip($ftype); - } - break; default: $xfer += $input->skip($ftype); break; @@ -10406,11 +10434,6 @@ class ColumnStatistics { } $xfer += $output->writeFieldEnd(); } - if ($this->isStatsCompliant !== null) { - $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 3); - $xfer += $output->writeBool($this->isStatsCompliant); - $xfer += $output->writeFieldEnd(); - } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -10429,10 +10452,6 @@ class AggrStats { * @var int */ public $partsFound = null; - /** - * @var bool - */ - public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -10450,10 +10469,6 @@ class AggrStats { 'var' => 'partsFound', 'type' => TType::I64, ), - 3 => array( - 'var' => 'isStatsCompliant', - 'type' => TType::BOOL, - ), ); } if (is_array($vals)) { @@ -10463,9 +10478,6 @@ class AggrStats { if (isset($vals['partsFound'])) { $this->partsFound = $vals['partsFound']; } - if (isset($vals['isStatsCompliant'])) { - $this->isStatsCompliant = $vals['isStatsCompliant']; - } } } @@ -10513,9 +10525,547 @@ class AggrStats { $xfer += $input->skip($ftype); } break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('AggrStats'); + if ($this->colStats !== null) { + if (!is_array($this->colStats)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('colStats', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->colStats)); + { + foreach ($this->colStats as $iter257) + { + $xfer += $iter257->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->partsFound !== null) { + $xfer += $output->writeFieldBegin('partsFound', TType::I64, 2); + $xfer += $output->writeI64($this->partsFound); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class SetPartitionsStatsRequest { + static $_TSPEC; + + /** + * @var \metastore\ColumnStatistics[] + */ + public $colStats = null; + /** + * @var bool + */ + public $needMerge = null; + /** + * @var int + */ + public $writeId = -1; + /** + * @var string + */ + public $validWriteIdList = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'colStats', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\ColumnStatistics', + ), + ), + 2 => array( + 'var' => 'needMerge', + 'type' => TType::BOOL, + ), + 3 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + 4 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['colStats'])) { + $this->colStats = $vals['colStats']; + } + if (isset($vals['needMerge'])) { + $this->needMerge = $vals['needMerge']; + } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } + } + } + + public function getName() { + return 'SetPartitionsStatsRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->colStats = array(); + $_size258 = 0; + $_etype261 = 0; + $xfer += $input->readListBegin($_etype261, $_size258); + for ($_i262 = 0; $_i262 < $_size258; ++$_i262) + { + $elem263 = null; + $elem263 = new \metastore\ColumnStatistics(); + $xfer += $elem263->read($input); + $this->colStats []= $elem263; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->needMerge); + } else { + $xfer += $input->skip($ftype); + } + break; case 3: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('SetPartitionsStatsRequest'); + if ($this->colStats !== null) { + if (!is_array($this->colStats)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('colStats', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->colStats)); + { + foreach ($this->colStats as $iter264) + { + $xfer += $iter264->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->needMerge !== null) { + $xfer += $output->writeFieldBegin('needMerge', TType::BOOL, 2); + $xfer += $output->writeBool($this->needMerge); + $xfer += $output->writeFieldEnd(); + } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 3); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class SetPartitionsStatsResponse { + static $_TSPEC; + + /** + * @var bool + */ + public $result = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'result', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['result'])) { + $this->result = $vals['result']; + } + } + } + + public function getName() { + return 'SetPartitionsStatsResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->result); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('SetPartitionsStatsResponse'); + if ($this->result !== null) { + $xfer += $output->writeFieldBegin('result', TType::BOOL, 1); + $xfer += $output->writeBool($this->result); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class SetBasicStatsRequest { + static $_TSPEC; + + /** + * @var \metastore\ColumnStatisticsDesc + */ + public $desc = null; + /** + * @var bool + */ + public $isValid = null; + /** + * @var array + */ + public $legacyStats = null; + /** + * @var int + */ + public $writeId = -1; + /** + * @var string + */ + public $validWriteIdList = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'desc', + 'type' => TType::STRUCT, + 'class' => '\metastore\ColumnStatisticsDesc', + ), + 2 => array( + 'var' => 'isValid', + 'type' => TType::BOOL, + ), + 3 => array( + 'var' => 'legacyStats', + 'type' => TType::MAP, + 'ktype' => TType::STRING, + 'vtype' => TType::STRING, + 'key' => array( + 'type' => TType::STRING, + ), + 'val' => array( + 'type' => TType::STRING, + ), + ), + 4 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + 5 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['desc'])) { + $this->desc = $vals['desc']; + } + if (isset($vals['isValid'])) { + $this->isValid = $vals['isValid']; + } + if (isset($vals['legacyStats'])) { + $this->legacyStats = $vals['legacyStats']; + } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } + } + } + + public function getName() { + return 'SetBasicStatsRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->desc = new \metastore\ColumnStatisticsDesc(); + $xfer += $this->desc->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->isValid); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::MAP) { + $this->legacyStats = array(); + $_size265 = 0; + $_ktype266 = 0; + $_vtype267 = 0; + $xfer += $input->readMapBegin($_ktype266, $_vtype267, $_size265); + for ($_i269 = 0; $_i269 < $_size265; ++$_i269) + { + $key270 = ''; + $val271 = ''; + $xfer += $input->readString($key270); + $xfer += $input->readString($val271); + $this->legacyStats[$key270] = $val271; + } + $xfer += $input->readMapEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('SetBasicStatsRequest'); + if ($this->desc !== null) { + if (!is_object($this->desc)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('desc', TType::STRUCT, 1); + $xfer += $this->desc->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->isValid !== null) { + $xfer += $output->writeFieldBegin('isValid', TType::BOOL, 2); + $xfer += $output->writeBool($this->isValid); + $xfer += $output->writeFieldEnd(); + } + if ($this->legacyStats !== null) { + if (!is_array($this->legacyStats)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('legacyStats', TType::MAP, 3); + { + $output->writeMapBegin(TType::STRING, TType::STRING, count($this->legacyStats)); + { + foreach ($this->legacyStats as $kiter272 => $viter273) + { + $xfer += $output->writeString($kiter272); + $xfer += $output->writeString($viter273); + } + } + $output->writeMapEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 4); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 5); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class SetBasicStatsResponse { + static $_TSPEC; + + /** + * @var bool + */ + public $result = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'result', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['result'])) { + $this->result = $vals['result']; + } + } + } + + public function getName() { + return 'SetBasicStatsResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->isStatsCompliant); + $xfer += $input->readBool($this->result); } else { $xfer += $input->skip($ftype); } @@ -10532,32 +11082,10 @@ class AggrStats { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('AggrStats'); - if ($this->colStats !== null) { - if (!is_array($this->colStats)) { - throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); - } - $xfer += $output->writeFieldBegin('colStats', TType::LST, 1); - { - $output->writeListBegin(TType::STRUCT, count($this->colStats)); - { - foreach ($this->colStats as $iter257) - { - $xfer += $iter257->write($output); - } - } - $output->writeListEnd(); - } - $xfer += $output->writeFieldEnd(); - } - if ($this->partsFound !== null) { - $xfer += $output->writeFieldBegin('partsFound', TType::I64, 2); - $xfer += $output->writeI64($this->partsFound); - $xfer += $output->writeFieldEnd(); - } - if ($this->isStatsCompliant !== null) { - $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 3); - $xfer += $output->writeBool($this->isStatsCompliant); + $xfer += $output->writeStructBegin('SetBasicStatsResponse'); + if ($this->result !== null) { + $xfer += $output->writeFieldBegin('result', TType::BOOL, 1); + $xfer += $output->writeBool($this->result); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -10567,70 +11095,76 @@ class AggrStats { } -class SetPartitionsStatsRequest { +class InvalidateColumnStatsRequest { static $_TSPEC; /** - * @var \metastore\ColumnStatistics[] + * @var string */ - public $colStats = null; + public $catName = null; /** - * @var bool + * @var string */ - public $needMerge = null; + public $dbName = null; /** - * @var int + * @var string */ - public $writeId = -1; + public $tableName = null; /** * @var string */ - public $validWriteIdList = null; + public $partName = null; + /** + * @var int + */ + public $writeId = -1; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'colStats', - 'type' => TType::LST, - 'etype' => TType::STRUCT, - 'elem' => array( - 'type' => TType::STRUCT, - 'class' => '\metastore\ColumnStatistics', - ), + 'var' => 'catName', + 'type' => TType::STRING, ), 2 => array( - 'var' => 'needMerge', - 'type' => TType::BOOL, + 'var' => 'dbName', + 'type' => TType::STRING, ), 3 => array( - 'var' => 'writeId', - 'type' => TType::I64, + 'var' => 'tableName', + 'type' => TType::STRING, ), 4 => array( - 'var' => 'validWriteIdList', + 'var' => 'partName', 'type' => TType::STRING, ), + 5 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), ); } if (is_array($vals)) { - if (isset($vals['colStats'])) { - $this->colStats = $vals['colStats']; + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; } - if (isset($vals['needMerge'])) { - $this->needMerge = $vals['needMerge']; + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tableName'])) { + $this->tableName = $vals['tableName']; + } + if (isset($vals['partName'])) { + $this->partName = $vals['partName']; } if (isset($vals['writeId'])) { $this->writeId = $vals['writeId']; } - if (isset($vals['validWriteIdList'])) { - $this->validWriteIdList = $vals['validWriteIdList']; - } } } public function getName() { - return 'SetPartitionsStatsRequest'; + return 'InvalidateColumnStatsRequest'; } public function read($input) @@ -10649,40 +11183,36 @@ class SetPartitionsStatsRequest { switch ($fid) { case 1: - if ($ftype == TType::LST) { - $this->colStats = array(); - $_size258 = 0; - $_etype261 = 0; - $xfer += $input->readListBegin($_etype261, $_size258); - for ($_i262 = 0; $_i262 < $_size258; ++$_i262) - { - $elem263 = null; - $elem263 = new \metastore\ColumnStatistics(); - $xfer += $elem263->read($input); - $this->colStats []= $elem263; - } - $xfer += $input->readListEnd(); + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: - if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->needMerge); + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); } else { $xfer += $input->skip($ftype); } break; case 3: - if ($ftype == TType::I64) { - $xfer += $input->readI64($this->writeId); + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tableName); } else { $xfer += $input->skip($ftype); } break; case 4: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->validWriteIdList); + $xfer += $input->readString($this->partName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); } else { $xfer += $input->skip($ftype); } @@ -10699,37 +11229,30 @@ class SetPartitionsStatsRequest { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('SetPartitionsStatsRequest'); - if ($this->colStats !== null) { - if (!is_array($this->colStats)) { - throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); - } - $xfer += $output->writeFieldBegin('colStats', TType::LST, 1); - { - $output->writeListBegin(TType::STRUCT, count($this->colStats)); - { - foreach ($this->colStats as $iter264) - { - $xfer += $iter264->write($output); - } - } - $output->writeListEnd(); - } + $xfer += $output->writeStructBegin('InvalidateColumnStatsRequest'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } - if ($this->needMerge !== null) { - $xfer += $output->writeFieldBegin('needMerge', TType::BOOL, 2); - $xfer += $output->writeBool($this->needMerge); + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 2); + $xfer += $output->writeString($this->dbName); $xfer += $output->writeFieldEnd(); } - if ($this->writeId !== null) { - $xfer += $output->writeFieldBegin('writeId', TType::I64, 3); - $xfer += $output->writeI64($this->writeId); + if ($this->tableName !== null) { + $xfer += $output->writeFieldBegin('tableName', TType::STRING, 3); + $xfer += $output->writeString($this->tableName); $xfer += $output->writeFieldEnd(); } - if ($this->validWriteIdList !== null) { - $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); - $xfer += $output->writeString($this->validWriteIdList); + if ($this->partName !== null) { + $xfer += $output->writeFieldBegin('partName', TType::STRING, 4); + $xfer += $output->writeString($this->partName); + $xfer += $output->writeFieldEnd(); + } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 5); + $xfer += $output->writeI64($this->writeId); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -10739,7 +11262,7 @@ class SetPartitionsStatsRequest { } -class SetPartitionsStatsResponse { +class InvalidateColumnStatsResponse { static $_TSPEC; /** @@ -10764,7 +11287,7 @@ class SetPartitionsStatsResponse { } public function getName() { - return 'SetPartitionsStatsResponse'; + return 'InvalidateColumnStatsResponse'; } public function read($input) @@ -10801,7 +11324,7 @@ class SetPartitionsStatsResponse { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('SetPartitionsStatsResponse'); + $xfer += $output->writeStructBegin('InvalidateColumnStatsResponse'); if ($this->result !== null) { $xfer += $output->writeFieldBegin('result', TType::BOOL, 1); $xfer += $output->writeBool($this->result); @@ -10884,15 +11407,15 @@ class Schema { case 1: if ($ftype == TType::LST) { $this->fieldSchemas = array(); - $_size265 = 0; - $_etype268 = 0; - $xfer += $input->readListBegin($_etype268, $_size265); - for ($_i269 = 0; $_i269 < $_size265; ++$_i269) + $_size274 = 0; + $_etype277 = 0; + $xfer += $input->readListBegin($_etype277, $_size274); + for ($_i278 = 0; $_i278 < $_size274; ++$_i278) { - $elem270 = null; - $elem270 = new \metastore\FieldSchema(); - $xfer += $elem270->read($input); - $this->fieldSchemas []= $elem270; + $elem279 = null; + $elem279 = new \metastore\FieldSchema(); + $xfer += $elem279->read($input); + $this->fieldSchemas []= $elem279; } $xfer += $input->readListEnd(); } else { @@ -10902,17 +11425,17 @@ class Schema { case 2: if ($ftype == TType::MAP) { $this->properties = array(); - $_size271 = 0; - $_ktype272 = 0; - $_vtype273 = 0; - $xfer += $input->readMapBegin($_ktype272, $_vtype273, $_size271); - for ($_i275 = 0; $_i275 < $_size271; ++$_i275) + $_size280 = 0; + $_ktype281 = 0; + $_vtype282 = 0; + $xfer += $input->readMapBegin($_ktype281, $_vtype282, $_size280); + for ($_i284 = 0; $_i284 < $_size280; ++$_i284) { - $key276 = ''; - $val277 = ''; - $xfer += $input->readString($key276); - $xfer += $input->readString($val277); - $this->properties[$key276] = $val277; + $key285 = ''; + $val286 = ''; + $xfer += $input->readString($key285); + $xfer += $input->readString($val286); + $this->properties[$key285] = $val286; } $xfer += $input->readMapEnd(); } else { @@ -10940,9 +11463,9 @@ class Schema { { $output->writeListBegin(TType::STRUCT, count($this->fieldSchemas)); { - foreach ($this->fieldSchemas as $iter278) + foreach ($this->fieldSchemas as $iter287) { - $xfer += $iter278->write($output); + $xfer += $iter287->write($output); } } $output->writeListEnd(); @@ -10957,10 +11480,10 @@ class Schema { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter279 => $viter280) + foreach ($this->properties as $kiter288 => $viter289) { - $xfer += $output->writeString($kiter279); - $xfer += $output->writeString($viter280); + $xfer += $output->writeString($kiter288); + $xfer += $output->writeString($viter289); } } $output->writeMapEnd(); @@ -11028,17 +11551,17 @@ class EnvironmentContext { case 1: if ($ftype == TType::MAP) { $this->properties = array(); - $_size281 = 0; - $_ktype282 = 0; - $_vtype283 = 0; - $xfer += $input->readMapBegin($_ktype282, $_vtype283, $_size281); - for ($_i285 = 0; $_i285 < $_size281; ++$_i285) + $_size290 = 0; + $_ktype291 = 0; + $_vtype292 = 0; + $xfer += $input->readMapBegin($_ktype291, $_vtype292, $_size290); + for ($_i294 = 0; $_i294 < $_size290; ++$_i294) { - $key286 = ''; - $val287 = ''; - $xfer += $input->readString($key286); - $xfer += $input->readString($val287); - $this->properties[$key286] = $val287; + $key295 = ''; + $val296 = ''; + $xfer += $input->readString($key295); + $xfer += $input->readString($val296); + $this->properties[$key295] = $val296; } $xfer += $input->readMapEnd(); } else { @@ -11066,10 +11589,10 @@ class EnvironmentContext { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter288 => $viter289) + foreach ($this->properties as $kiter297 => $viter298) { - $xfer += $output->writeString($kiter288); - $xfer += $output->writeString($viter289); + $xfer += $output->writeString($kiter297); + $xfer += $output->writeString($viter298); } } $output->writeMapEnd(); @@ -11255,15 +11778,15 @@ class PrimaryKeysResponse { case 1: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size290 = 0; - $_etype293 = 0; - $xfer += $input->readListBegin($_etype293, $_size290); - for ($_i294 = 0; $_i294 < $_size290; ++$_i294) + $_size299 = 0; + $_etype302 = 0; + $xfer += $input->readListBegin($_etype302, $_size299); + for ($_i303 = 0; $_i303 < $_size299; ++$_i303) { - $elem295 = null; - $elem295 = new \metastore\SQLPrimaryKey(); - $xfer += $elem295->read($input); - $this->primaryKeys []= $elem295; + $elem304 = null; + $elem304 = new \metastore\SQLPrimaryKey(); + $xfer += $elem304->read($input); + $this->primaryKeys []= $elem304; } $xfer += $input->readListEnd(); } else { @@ -11291,9 +11814,9 @@ class PrimaryKeysResponse { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter296) + foreach ($this->primaryKeys as $iter305) { - $xfer += $iter296->write($output); + $xfer += $iter305->write($output); } } $output->writeListEnd(); @@ -11525,15 +12048,15 @@ class ForeignKeysResponse { case 1: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size297 = 0; - $_etype300 = 0; - $xfer += $input->readListBegin($_etype300, $_size297); - for ($_i301 = 0; $_i301 < $_size297; ++$_i301) + $_size306 = 0; + $_etype309 = 0; + $xfer += $input->readListBegin($_etype309, $_size306); + for ($_i310 = 0; $_i310 < $_size306; ++$_i310) { - $elem302 = null; - $elem302 = new \metastore\SQLForeignKey(); - $xfer += $elem302->read($input); - $this->foreignKeys []= $elem302; + $elem311 = null; + $elem311 = new \metastore\SQLForeignKey(); + $xfer += $elem311->read($input); + $this->foreignKeys []= $elem311; } $xfer += $input->readListEnd(); } else { @@ -11561,9 +12084,9 @@ class ForeignKeysResponse { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter303) + foreach ($this->foreignKeys as $iter312) { - $xfer += $iter303->write($output); + $xfer += $iter312->write($output); } } $output->writeListEnd(); @@ -11749,15 +12272,15 @@ class UniqueConstraintsResponse { case 1: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size304 = 0; - $_etype307 = 0; - $xfer += $input->readListBegin($_etype307, $_size304); - for ($_i308 = 0; $_i308 < $_size304; ++$_i308) + $_size313 = 0; + $_etype316 = 0; + $xfer += $input->readListBegin($_etype316, $_size313); + for ($_i317 = 0; $_i317 < $_size313; ++$_i317) { - $elem309 = null; - $elem309 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem309->read($input); - $this->uniqueConstraints []= $elem309; + $elem318 = null; + $elem318 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem318->read($input); + $this->uniqueConstraints []= $elem318; } $xfer += $input->readListEnd(); } else { @@ -11785,9 +12308,9 @@ class UniqueConstraintsResponse { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter310) + foreach ($this->uniqueConstraints as $iter319) { - $xfer += $iter310->write($output); + $xfer += $iter319->write($output); } } $output->writeListEnd(); @@ -11973,15 +12496,15 @@ class NotNullConstraintsResponse { case 1: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size311 = 0; - $_etype314 = 0; - $xfer += $input->readListBegin($_etype314, $_size311); - for ($_i315 = 0; $_i315 < $_size311; ++$_i315) + $_size320 = 0; + $_etype323 = 0; + $xfer += $input->readListBegin($_etype323, $_size320); + for ($_i324 = 0; $_i324 < $_size320; ++$_i324) { - $elem316 = null; - $elem316 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem316->read($input); - $this->notNullConstraints []= $elem316; + $elem325 = null; + $elem325 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem325->read($input); + $this->notNullConstraints []= $elem325; } $xfer += $input->readListEnd(); } else { @@ -12009,9 +12532,9 @@ class NotNullConstraintsResponse { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter317) + foreach ($this->notNullConstraints as $iter326) { - $xfer += $iter317->write($output); + $xfer += $iter326->write($output); } } $output->writeListEnd(); @@ -12197,15 +12720,15 @@ class DefaultConstraintsResponse { case 1: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size318 = 0; - $_etype321 = 0; - $xfer += $input->readListBegin($_etype321, $_size318); - for ($_i322 = 0; $_i322 < $_size318; ++$_i322) + $_size327 = 0; + $_etype330 = 0; + $xfer += $input->readListBegin($_etype330, $_size327); + for ($_i331 = 0; $_i331 < $_size327; ++$_i331) { - $elem323 = null; - $elem323 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem323->read($input); - $this->defaultConstraints []= $elem323; + $elem332 = null; + $elem332 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem332->read($input); + $this->defaultConstraints []= $elem332; } $xfer += $input->readListEnd(); } else { @@ -12233,9 +12756,9 @@ class DefaultConstraintsResponse { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter324) + foreach ($this->defaultConstraints as $iter333) { - $xfer += $iter324->write($output); + $xfer += $iter333->write($output); } } $output->writeListEnd(); @@ -12421,15 +12944,15 @@ class CheckConstraintsResponse { case 1: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size325 = 0; - $_etype328 = 0; - $xfer += $input->readListBegin($_etype328, $_size325); - for ($_i329 = 0; $_i329 < $_size325; ++$_i329) + $_size334 = 0; + $_etype337 = 0; + $xfer += $input->readListBegin($_etype337, $_size334); + for ($_i338 = 0; $_i338 < $_size334; ++$_i338) { - $elem330 = null; - $elem330 = new \metastore\SQLCheckConstraint(); - $xfer += $elem330->read($input); - $this->checkConstraints []= $elem330; + $elem339 = null; + $elem339 = new \metastore\SQLCheckConstraint(); + $xfer += $elem339->read($input); + $this->checkConstraints []= $elem339; } $xfer += $input->readListEnd(); } else { @@ -12457,9 +12980,9 @@ class CheckConstraintsResponse { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); { - foreach ($this->checkConstraints as $iter331) + foreach ($this->checkConstraints as $iter340) { - $xfer += $iter331->write($output); + $xfer += $iter340->write($output); } } $output->writeListEnd(); @@ -12668,15 +13191,15 @@ class AddPrimaryKeyRequest { case 1: if ($ftype == TType::LST) { $this->primaryKeyCols = array(); - $_size332 = 0; - $_etype335 = 0; - $xfer += $input->readListBegin($_etype335, $_size332); - for ($_i336 = 0; $_i336 < $_size332; ++$_i336) + $_size341 = 0; + $_etype344 = 0; + $xfer += $input->readListBegin($_etype344, $_size341); + for ($_i345 = 0; $_i345 < $_size341; ++$_i345) { - $elem337 = null; - $elem337 = new \metastore\SQLPrimaryKey(); - $xfer += $elem337->read($input); - $this->primaryKeyCols []= $elem337; + $elem346 = null; + $elem346 = new \metastore\SQLPrimaryKey(); + $xfer += $elem346->read($input); + $this->primaryKeyCols []= $elem346; } $xfer += $input->readListEnd(); } else { @@ -12704,9 +13227,9 @@ class AddPrimaryKeyRequest { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeyCols)); { - foreach ($this->primaryKeyCols as $iter338) + foreach ($this->primaryKeyCols as $iter347) { - $xfer += $iter338->write($output); + $xfer += $iter347->write($output); } } $output->writeListEnd(); @@ -12771,15 +13294,15 @@ class AddForeignKeyRequest { case 1: if ($ftype == TType::LST) { $this->foreignKeyCols = array(); - $_size339 = 0; - $_etype342 = 0; - $xfer += $input->readListBegin($_etype342, $_size339); - for ($_i343 = 0; $_i343 < $_size339; ++$_i343) + $_size348 = 0; + $_etype351 = 0; + $xfer += $input->readListBegin($_etype351, $_size348); + for ($_i352 = 0; $_i352 < $_size348; ++$_i352) { - $elem344 = null; - $elem344 = new \metastore\SQLForeignKey(); - $xfer += $elem344->read($input); - $this->foreignKeyCols []= $elem344; + $elem353 = null; + $elem353 = new \metastore\SQLForeignKey(); + $xfer += $elem353->read($input); + $this->foreignKeyCols []= $elem353; } $xfer += $input->readListEnd(); } else { @@ -12807,9 +13330,9 @@ class AddForeignKeyRequest { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeyCols)); { - foreach ($this->foreignKeyCols as $iter345) + foreach ($this->foreignKeyCols as $iter354) { - $xfer += $iter345->write($output); + $xfer += $iter354->write($output); } } $output->writeListEnd(); @@ -12874,15 +13397,15 @@ class AddUniqueConstraintRequest { case 1: if ($ftype == TType::LST) { $this->uniqueConstraintCols = array(); - $_size346 = 0; - $_etype349 = 0; - $xfer += $input->readListBegin($_etype349, $_size346); - for ($_i350 = 0; $_i350 < $_size346; ++$_i350) + $_size355 = 0; + $_etype358 = 0; + $xfer += $input->readListBegin($_etype358, $_size355); + for ($_i359 = 0; $_i359 < $_size355; ++$_i359) { - $elem351 = null; - $elem351 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem351->read($input); - $this->uniqueConstraintCols []= $elem351; + $elem360 = null; + $elem360 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem360->read($input); + $this->uniqueConstraintCols []= $elem360; } $xfer += $input->readListEnd(); } else { @@ -12910,9 +13433,9 @@ class AddUniqueConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraintCols)); { - foreach ($this->uniqueConstraintCols as $iter352) + foreach ($this->uniqueConstraintCols as $iter361) { - $xfer += $iter352->write($output); + $xfer += $iter361->write($output); } } $output->writeListEnd(); @@ -12977,15 +13500,15 @@ class AddNotNullConstraintRequest { case 1: if ($ftype == TType::LST) { $this->notNullConstraintCols = array(); - $_size353 = 0; - $_etype356 = 0; - $xfer += $input->readListBegin($_etype356, $_size353); - for ($_i357 = 0; $_i357 < $_size353; ++$_i357) + $_size362 = 0; + $_etype365 = 0; + $xfer += $input->readListBegin($_etype365, $_size362); + for ($_i366 = 0; $_i366 < $_size362; ++$_i366) { - $elem358 = null; - $elem358 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem358->read($input); - $this->notNullConstraintCols []= $elem358; + $elem367 = null; + $elem367 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem367->read($input); + $this->notNullConstraintCols []= $elem367; } $xfer += $input->readListEnd(); } else { @@ -13013,9 +13536,9 @@ class AddNotNullConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraintCols)); { - foreach ($this->notNullConstraintCols as $iter359) + foreach ($this->notNullConstraintCols as $iter368) { - $xfer += $iter359->write($output); + $xfer += $iter368->write($output); } } $output->writeListEnd(); @@ -13080,15 +13603,15 @@ class AddDefaultConstraintRequest { case 1: if ($ftype == TType::LST) { $this->defaultConstraintCols = array(); - $_size360 = 0; - $_etype363 = 0; - $xfer += $input->readListBegin($_etype363, $_size360); - for ($_i364 = 0; $_i364 < $_size360; ++$_i364) + $_size369 = 0; + $_etype372 = 0; + $xfer += $input->readListBegin($_etype372, $_size369); + for ($_i373 = 0; $_i373 < $_size369; ++$_i373) { - $elem365 = null; - $elem365 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem365->read($input); - $this->defaultConstraintCols []= $elem365; + $elem374 = null; + $elem374 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem374->read($input); + $this->defaultConstraintCols []= $elem374; } $xfer += $input->readListEnd(); } else { @@ -13116,9 +13639,9 @@ class AddDefaultConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraintCols)); { - foreach ($this->defaultConstraintCols as $iter366) + foreach ($this->defaultConstraintCols as $iter375) { - $xfer += $iter366->write($output); + $xfer += $iter375->write($output); } } $output->writeListEnd(); @@ -13183,15 +13706,15 @@ class AddCheckConstraintRequest { case 1: if ($ftype == TType::LST) { $this->checkConstraintCols = array(); - $_size367 = 0; - $_etype370 = 0; - $xfer += $input->readListBegin($_etype370, $_size367); - for ($_i371 = 0; $_i371 < $_size367; ++$_i371) + $_size376 = 0; + $_etype379 = 0; + $xfer += $input->readListBegin($_etype379, $_size376); + for ($_i380 = 0; $_i380 < $_size376; ++$_i380) { - $elem372 = null; - $elem372 = new \metastore\SQLCheckConstraint(); - $xfer += $elem372->read($input); - $this->checkConstraintCols []= $elem372; + $elem381 = null; + $elem381 = new \metastore\SQLCheckConstraint(); + $xfer += $elem381->read($input); + $this->checkConstraintCols []= $elem381; } $xfer += $input->readListEnd(); } else { @@ -13219,9 +13742,9 @@ class AddCheckConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraintCols)); { - foreach ($this->checkConstraintCols as $iter373) + foreach ($this->checkConstraintCols as $iter382) { - $xfer += $iter373->write($output); + $xfer += $iter382->write($output); } } $output->writeListEnd(); @@ -13297,15 +13820,15 @@ class PartitionsByExprResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size374 = 0; - $_etype377 = 0; - $xfer += $input->readListBegin($_etype377, $_size374); - for ($_i378 = 0; $_i378 < $_size374; ++$_i378) + $_size383 = 0; + $_etype386 = 0; + $xfer += $input->readListBegin($_etype386, $_size383); + for ($_i387 = 0; $_i387 < $_size383; ++$_i387) { - $elem379 = null; - $elem379 = new \metastore\Partition(); - $xfer += $elem379->read($input); - $this->partitions []= $elem379; + $elem388 = null; + $elem388 = new \metastore\Partition(); + $xfer += $elem388->read($input); + $this->partitions []= $elem388; } $xfer += $input->readListEnd(); } else { @@ -13340,9 +13863,9 @@ class PartitionsByExprResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter380) + foreach ($this->partitions as $iter389) { - $xfer += $iter380->write($output); + $xfer += $iter389->write($output); } } $output->writeListEnd(); @@ -13558,10 +14081,6 @@ class TableStatsResult { * @var \metastore\ColumnStatisticsObj[] */ public $tableStats = null; - /** - * @var bool - */ - public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13575,19 +14094,12 @@ class TableStatsResult { 'class' => '\metastore\ColumnStatisticsObj', ), ), - 2 => array( - 'var' => 'isStatsCompliant', - 'type' => TType::BOOL, - ), ); } if (is_array($vals)) { if (isset($vals['tableStats'])) { $this->tableStats = $vals['tableStats']; } - if (isset($vals['isStatsCompliant'])) { - $this->isStatsCompliant = $vals['isStatsCompliant']; - } } } @@ -13613,28 +14125,21 @@ class TableStatsResult { case 1: if ($ftype == TType::LST) { $this->tableStats = array(); - $_size381 = 0; - $_etype384 = 0; - $xfer += $input->readListBegin($_etype384, $_size381); - for ($_i385 = 0; $_i385 < $_size381; ++$_i385) + $_size390 = 0; + $_etype393 = 0; + $xfer += $input->readListBegin($_etype393, $_size390); + for ($_i394 = 0; $_i394 < $_size390; ++$_i394) { - $elem386 = null; - $elem386 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem386->read($input); - $this->tableStats []= $elem386; + $elem395 = null; + $elem395 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem395->read($input); + $this->tableStats []= $elem395; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; - case 2: - if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->isStatsCompliant); - } else { - $xfer += $input->skip($ftype); - } - break; default: $xfer += $input->skip($ftype); break; @@ -13656,20 +14161,15 @@ class TableStatsResult { { $output->writeListBegin(TType::STRUCT, count($this->tableStats)); { - foreach ($this->tableStats as $iter387) + foreach ($this->tableStats as $iter396) { - $xfer += $iter387->write($output); + $xfer += $iter396->write($output); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } - if ($this->isStatsCompliant !== null) { - $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 2); - $xfer += $output->writeBool($this->isStatsCompliant); - $xfer += $output->writeFieldEnd(); - } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13684,10 +14184,6 @@ class PartitionsStatsResult { * @var array */ public $partStats = null; - /** - * @var bool - */ - public $isStatsCompliant = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13709,19 +14205,12 @@ class PartitionsStatsResult { ), ), ), - 2 => array( - 'var' => 'isStatsCompliant', - 'type' => TType::BOOL, - ), ); } if (is_array($vals)) { if (isset($vals['partStats'])) { $this->partStats = $vals['partStats']; } - if (isset($vals['isStatsCompliant'])) { - $this->isStatsCompliant = $vals['isStatsCompliant']; - } } } @@ -13747,41 +14236,34 @@ class PartitionsStatsResult { case 1: if ($ftype == TType::MAP) { $this->partStats = array(); - $_size388 = 0; - $_ktype389 = 0; - $_vtype390 = 0; - $xfer += $input->readMapBegin($_ktype389, $_vtype390, $_size388); - for ($_i392 = 0; $_i392 < $_size388; ++$_i392) + $_size397 = 0; + $_ktype398 = 0; + $_vtype399 = 0; + $xfer += $input->readMapBegin($_ktype398, $_vtype399, $_size397); + for ($_i401 = 0; $_i401 < $_size397; ++$_i401) { - $key393 = ''; - $val394 = array(); - $xfer += $input->readString($key393); - $val394 = array(); - $_size395 = 0; - $_etype398 = 0; - $xfer += $input->readListBegin($_etype398, $_size395); - for ($_i399 = 0; $_i399 < $_size395; ++$_i399) + $key402 = ''; + $val403 = array(); + $xfer += $input->readString($key402); + $val403 = array(); + $_size404 = 0; + $_etype407 = 0; + $xfer += $input->readListBegin($_etype407, $_size404); + for ($_i408 = 0; $_i408 < $_size404; ++$_i408) { - $elem400 = null; - $elem400 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem400->read($input); - $val394 []= $elem400; + $elem409 = null; + $elem409 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem409->read($input); + $val403 []= $elem409; } $xfer += $input->readListEnd(); - $this->partStats[$key393] = $val394; + $this->partStats[$key402] = $val403; } $xfer += $input->readMapEnd(); } else { $xfer += $input->skip($ftype); } break; - case 2: - if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->isStatsCompliant); - } else { - $xfer += $input->skip($ftype); - } - break; default: $xfer += $input->skip($ftype); break; @@ -13803,15 +14285,15 @@ class PartitionsStatsResult { { $output->writeMapBegin(TType::STRING, TType::LST, count($this->partStats)); { - foreach ($this->partStats as $kiter401 => $viter402) + foreach ($this->partStats as $kiter410 => $viter411) { - $xfer += $output->writeString($kiter401); + $xfer += $output->writeString($kiter410); { - $output->writeListBegin(TType::STRUCT, count($viter402)); + $output->writeListBegin(TType::STRUCT, count($viter411)); { - foreach ($viter402 as $iter403) + foreach ($viter411 as $iter412) { - $xfer += $iter403->write($output); + $xfer += $iter412->write($output); } } $output->writeListEnd(); @@ -13822,11 +14304,6 @@ class PartitionsStatsResult { } $xfer += $output->writeFieldEnd(); } - if ($this->isStatsCompliant !== null) { - $xfer += $output->writeFieldBegin('isStatsCompliant', TType::BOOL, 2); - $xfer += $output->writeBool($this->isStatsCompliant); - $xfer += $output->writeFieldEnd(); - } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13942,14 +14419,14 @@ class TableStatsRequest { case 3: if ($ftype == TType::LST) { $this->colNames = array(); - $_size404 = 0; - $_etype407 = 0; - $xfer += $input->readListBegin($_etype407, $_size404); - for ($_i408 = 0; $_i408 < $_size404; ++$_i408) + $_size413 = 0; + $_etype416 = 0; + $xfer += $input->readListBegin($_etype416, $_size413); + for ($_i417 = 0; $_i417 < $_size413; ++$_i417) { - $elem409 = null; - $xfer += $input->readString($elem409); - $this->colNames []= $elem409; + $elem418 = null; + $xfer += $input->readString($elem418); + $this->colNames []= $elem418; } $xfer += $input->readListEnd(); } else { @@ -14001,9 +14478,9 @@ class TableStatsRequest { { $output->writeListBegin(TType::STRING, count($this->colNames)); { - foreach ($this->colNames as $iter410) + foreach ($this->colNames as $iter419) { - $xfer += $output->writeString($iter410); + $xfer += $output->writeString($iter419); } } $output->writeListEnd(); @@ -14150,14 +14627,14 @@ class PartitionsStatsRequest { case 3: if ($ftype == TType::LST) { $this->colNames = array(); - $_size411 = 0; - $_etype414 = 0; - $xfer += $input->readListBegin($_etype414, $_size411); - for ($_i415 = 0; $_i415 < $_size411; ++$_i415) + $_size420 = 0; + $_etype423 = 0; + $xfer += $input->readListBegin($_etype423, $_size420); + for ($_i424 = 0; $_i424 < $_size420; ++$_i424) { - $elem416 = null; - $xfer += $input->readString($elem416); - $this->colNames []= $elem416; + $elem425 = null; + $xfer += $input->readString($elem425); + $this->colNames []= $elem425; } $xfer += $input->readListEnd(); } else { @@ -14167,14 +14644,14 @@ class PartitionsStatsRequest { case 4: if ($ftype == TType::LST) { $this->partNames = array(); - $_size417 = 0; - $_etype420 = 0; - $xfer += $input->readListBegin($_etype420, $_size417); - for ($_i421 = 0; $_i421 < $_size417; ++$_i421) + $_size426 = 0; + $_etype429 = 0; + $xfer += $input->readListBegin($_etype429, $_size426); + for ($_i430 = 0; $_i430 < $_size426; ++$_i430) { - $elem422 = null; - $xfer += $input->readString($elem422); - $this->partNames []= $elem422; + $elem431 = null; + $xfer += $input->readString($elem431); + $this->partNames []= $elem431; } $xfer += $input->readListEnd(); } else { @@ -14226,9 +14703,9 @@ class PartitionsStatsRequest { { $output->writeListBegin(TType::STRING, count($this->colNames)); { - foreach ($this->colNames as $iter423) + foreach ($this->colNames as $iter432) { - $xfer += $output->writeString($iter423); + $xfer += $output->writeString($iter432); } } $output->writeListEnd(); @@ -14243,9 +14720,9 @@ class PartitionsStatsRequest { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter424) + foreach ($this->partNames as $iter433) { - $xfer += $output->writeString($iter424); + $xfer += $output->writeString($iter433); } } $output->writeListEnd(); @@ -14331,15 +14808,15 @@ class AddPartitionsResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size425 = 0; - $_etype428 = 0; - $xfer += $input->readListBegin($_etype428, $_size425); - for ($_i429 = 0; $_i429 < $_size425; ++$_i429) + $_size434 = 0; + $_etype437 = 0; + $xfer += $input->readListBegin($_etype437, $_size434); + for ($_i438 = 0; $_i438 < $_size434; ++$_i438) { - $elem430 = null; - $elem430 = new \metastore\Partition(); - $xfer += $elem430->read($input); - $this->partitions []= $elem430; + $elem439 = null; + $elem439 = new \metastore\Partition(); + $xfer += $elem439->read($input); + $this->partitions []= $elem439; } $xfer += $input->readListEnd(); } else { @@ -14374,9 +14851,9 @@ class AddPartitionsResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter431) + foreach ($this->partitions as $iter440) { - $xfer += $iter431->write($output); + $xfer += $iter440->write($output); } } $output->writeListEnd(); @@ -14526,15 +15003,15 @@ class AddPartitionsRequest { case 3: if ($ftype == TType::LST) { $this->parts = array(); - $_size432 = 0; - $_etype435 = 0; - $xfer += $input->readListBegin($_etype435, $_size432); - for ($_i436 = 0; $_i436 < $_size432; ++$_i436) + $_size441 = 0; + $_etype444 = 0; + $xfer += $input->readListBegin($_etype444, $_size441); + for ($_i445 = 0; $_i445 < $_size441; ++$_i445) { - $elem437 = null; - $elem437 = new \metastore\Partition(); - $xfer += $elem437->read($input); - $this->parts []= $elem437; + $elem446 = null; + $elem446 = new \metastore\Partition(); + $xfer += $elem446->read($input); + $this->parts []= $elem446; } $xfer += $input->readListEnd(); } else { @@ -14600,9 +15077,9 @@ class AddPartitionsRequest { { $output->writeListBegin(TType::STRUCT, count($this->parts)); { - foreach ($this->parts as $iter438) + foreach ($this->parts as $iter447) { - $xfer += $iter438->write($output); + $xfer += $iter447->write($output); } } $output->writeListEnd(); @@ -14687,15 +15164,15 @@ class DropPartitionsResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size439 = 0; - $_etype442 = 0; - $xfer += $input->readListBegin($_etype442, $_size439); - for ($_i443 = 0; $_i443 < $_size439; ++$_i443) + $_size448 = 0; + $_etype451 = 0; + $xfer += $input->readListBegin($_etype451, $_size448); + for ($_i452 = 0; $_i452 < $_size448; ++$_i452) { - $elem444 = null; - $elem444 = new \metastore\Partition(); - $xfer += $elem444->read($input); - $this->partitions []= $elem444; + $elem453 = null; + $elem453 = new \metastore\Partition(); + $xfer += $elem453->read($input); + $this->partitions []= $elem453; } $xfer += $input->readListEnd(); } else { @@ -14723,9 +15200,9 @@ class DropPartitionsResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter445) + foreach ($this->partitions as $iter454) { - $xfer += $iter445->write($output); + $xfer += $iter454->write($output); } } $output->writeListEnd(); @@ -14903,14 +15380,14 @@ class RequestPartsSpec { case 1: if ($ftype == TType::LST) { $this->names = array(); - $_size446 = 0; - $_etype449 = 0; - $xfer += $input->readListBegin($_etype449, $_size446); - for ($_i450 = 0; $_i450 < $_size446; ++$_i450) + $_size455 = 0; + $_etype458 = 0; + $xfer += $input->readListBegin($_etype458, $_size455); + for ($_i459 = 0; $_i459 < $_size455; ++$_i459) { - $elem451 = null; - $xfer += $input->readString($elem451); - $this->names []= $elem451; + $elem460 = null; + $xfer += $input->readString($elem460); + $this->names []= $elem460; } $xfer += $input->readListEnd(); } else { @@ -14920,15 +15397,15 @@ class RequestPartsSpec { case 2: if ($ftype == TType::LST) { $this->exprs = array(); - $_size452 = 0; - $_etype455 = 0; - $xfer += $input->readListBegin($_etype455, $_size452); - for ($_i456 = 0; $_i456 < $_size452; ++$_i456) + $_size461 = 0; + $_etype464 = 0; + $xfer += $input->readListBegin($_etype464, $_size461); + for ($_i465 = 0; $_i465 < $_size461; ++$_i465) { - $elem457 = null; - $elem457 = new \metastore\DropPartitionsExpr(); - $xfer += $elem457->read($input); - $this->exprs []= $elem457; + $elem466 = null; + $elem466 = new \metastore\DropPartitionsExpr(); + $xfer += $elem466->read($input); + $this->exprs []= $elem466; } $xfer += $input->readListEnd(); } else { @@ -14956,9 +15433,9 @@ class RequestPartsSpec { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter458) + foreach ($this->names as $iter467) { - $xfer += $output->writeString($iter458); + $xfer += $output->writeString($iter467); } } $output->writeListEnd(); @@ -14973,9 +15450,9 @@ class RequestPartsSpec { { $output->writeListBegin(TType::STRUCT, count($this->exprs)); { - foreach ($this->exprs as $iter459) + foreach ($this->exprs as $iter468) { - $xfer += $iter459->write($output); + $xfer += $iter468->write($output); } } $output->writeListEnd(); @@ -15416,15 +15893,15 @@ class PartitionValuesRequest { case 3: if ($ftype == TType::LST) { $this->partitionKeys = array(); - $_size460 = 0; - $_etype463 = 0; - $xfer += $input->readListBegin($_etype463, $_size460); - for ($_i464 = 0; $_i464 < $_size460; ++$_i464) + $_size469 = 0; + $_etype472 = 0; + $xfer += $input->readListBegin($_etype472, $_size469); + for ($_i473 = 0; $_i473 < $_size469; ++$_i473) { - $elem465 = null; - $elem465 = new \metastore\FieldSchema(); - $xfer += $elem465->read($input); - $this->partitionKeys []= $elem465; + $elem474 = null; + $elem474 = new \metastore\FieldSchema(); + $xfer += $elem474->read($input); + $this->partitionKeys []= $elem474; } $xfer += $input->readListEnd(); } else { @@ -15448,15 +15925,15 @@ class PartitionValuesRequest { case 6: if ($ftype == TType::LST) { $this->partitionOrder = array(); - $_size466 = 0; - $_etype469 = 0; - $xfer += $input->readListBegin($_etype469, $_size466); - for ($_i470 = 0; $_i470 < $_size466; ++$_i470) + $_size475 = 0; + $_etype478 = 0; + $xfer += $input->readListBegin($_etype478, $_size475); + for ($_i479 = 0; $_i479 < $_size475; ++$_i479) { - $elem471 = null; - $elem471 = new \metastore\FieldSchema(); - $xfer += $elem471->read($input); - $this->partitionOrder []= $elem471; + $elem480 = null; + $elem480 = new \metastore\FieldSchema(); + $xfer += $elem480->read($input); + $this->partitionOrder []= $elem480; } $xfer += $input->readListEnd(); } else { @@ -15515,9 +15992,9 @@ class PartitionValuesRequest { { $output->writeListBegin(TType::STRUCT, count($this->partitionKeys)); { - foreach ($this->partitionKeys as $iter472) + foreach ($this->partitionKeys as $iter481) { - $xfer += $iter472->write($output); + $xfer += $iter481->write($output); } } $output->writeListEnd(); @@ -15542,9 +16019,9 @@ class PartitionValuesRequest { { $output->writeListBegin(TType::STRUCT, count($this->partitionOrder)); { - foreach ($this->partitionOrder as $iter473) + foreach ($this->partitionOrder as $iter482) { - $xfer += $iter473->write($output); + $xfer += $iter482->write($output); } } $output->writeListEnd(); @@ -15623,14 +16100,14 @@ class PartitionValuesRow { case 1: if ($ftype == TType::LST) { $this->row = array(); - $_size474 = 0; - $_etype477 = 0; - $xfer += $input->readListBegin($_etype477, $_size474); - for ($_i478 = 0; $_i478 < $_size474; ++$_i478) + $_size483 = 0; + $_etype486 = 0; + $xfer += $input->readListBegin($_etype486, $_size483); + for ($_i487 = 0; $_i487 < $_size483; ++$_i487) { - $elem479 = null; - $xfer += $input->readString($elem479); - $this->row []= $elem479; + $elem488 = null; + $xfer += $input->readString($elem488); + $this->row []= $elem488; } $xfer += $input->readListEnd(); } else { @@ -15658,9 +16135,9 @@ class PartitionValuesRow { { $output->writeListBegin(TType::STRING, count($this->row)); { - foreach ($this->row as $iter480) + foreach ($this->row as $iter489) { - $xfer += $output->writeString($iter480); + $xfer += $output->writeString($iter489); } } $output->writeListEnd(); @@ -15725,15 +16202,15 @@ class PartitionValuesResponse { case 1: if ($ftype == TType::LST) { $this->partitionValues = array(); - $_size481 = 0; - $_etype484 = 0; - $xfer += $input->readListBegin($_etype484, $_size481); - for ($_i485 = 0; $_i485 < $_size481; ++$_i485) + $_size490 = 0; + $_etype493 = 0; + $xfer += $input->readListBegin($_etype493, $_size490); + for ($_i494 = 0; $_i494 < $_size490; ++$_i494) { - $elem486 = null; - $elem486 = new \metastore\PartitionValuesRow(); - $xfer += $elem486->read($input); - $this->partitionValues []= $elem486; + $elem495 = null; + $elem495 = new \metastore\PartitionValuesRow(); + $xfer += $elem495->read($input); + $this->partitionValues []= $elem495; } $xfer += $input->readListEnd(); } else { @@ -15761,9 +16238,9 @@ class PartitionValuesResponse { { $output->writeListBegin(TType::STRUCT, count($this->partitionValues)); { - foreach ($this->partitionValues as $iter487) + foreach ($this->partitionValues as $iter496) { - $xfer += $iter487->write($output); + $xfer += $iter496->write($output); } } $output->writeListEnd(); @@ -16063,15 +16540,15 @@ class Function { case 8: if ($ftype == TType::LST) { $this->resourceUris = array(); - $_size488 = 0; - $_etype491 = 0; - $xfer += $input->readListBegin($_etype491, $_size488); - for ($_i492 = 0; $_i492 < $_size488; ++$_i492) + $_size497 = 0; + $_etype500 = 0; + $xfer += $input->readListBegin($_etype500, $_size497); + for ($_i501 = 0; $_i501 < $_size497; ++$_i501) { - $elem493 = null; - $elem493 = new \metastore\ResourceUri(); - $xfer += $elem493->read($input); - $this->resourceUris []= $elem493; + $elem502 = null; + $elem502 = new \metastore\ResourceUri(); + $xfer += $elem502->read($input); + $this->resourceUris []= $elem502; } $xfer += $input->readListEnd(); } else { @@ -16141,9 +16618,9 @@ class Function { { $output->writeListBegin(TType::STRUCT, count($this->resourceUris)); { - foreach ($this->resourceUris as $iter494) + foreach ($this->resourceUris as $iter503) { - $xfer += $iter494->write($output); + $xfer += $iter503->write($output); } } $output->writeListEnd(); @@ -16490,15 +16967,15 @@ class GetOpenTxnsInfoResponse { case 2: if ($ftype == TType::LST) { $this->open_txns = array(); - $_size495 = 0; - $_etype498 = 0; - $xfer += $input->readListBegin($_etype498, $_size495); - for ($_i499 = 0; $_i499 < $_size495; ++$_i499) + $_size504 = 0; + $_etype507 = 0; + $xfer += $input->readListBegin($_etype507, $_size504); + for ($_i508 = 0; $_i508 < $_size504; ++$_i508) { - $elem500 = null; - $elem500 = new \metastore\TxnInfo(); - $xfer += $elem500->read($input); - $this->open_txns []= $elem500; + $elem509 = null; + $elem509 = new \metastore\TxnInfo(); + $xfer += $elem509->read($input); + $this->open_txns []= $elem509; } $xfer += $input->readListEnd(); } else { @@ -16531,9 +17008,9 @@ class GetOpenTxnsInfoResponse { { $output->writeListBegin(TType::STRUCT, count($this->open_txns)); { - foreach ($this->open_txns as $iter501) + foreach ($this->open_txns as $iter510) { - $xfer += $iter501->write($output); + $xfer += $iter510->write($output); } } $output->writeListEnd(); @@ -16637,14 +17114,14 @@ class GetOpenTxnsResponse { case 2: if ($ftype == TType::LST) { $this->open_txns = array(); - $_size502 = 0; - $_etype505 = 0; - $xfer += $input->readListBegin($_etype505, $_size502); - for ($_i506 = 0; $_i506 < $_size502; ++$_i506) + $_size511 = 0; + $_etype514 = 0; + $xfer += $input->readListBegin($_etype514, $_size511); + for ($_i515 = 0; $_i515 < $_size511; ++$_i515) { - $elem507 = null; - $xfer += $input->readI64($elem507); - $this->open_txns []= $elem507; + $elem516 = null; + $xfer += $input->readI64($elem516); + $this->open_txns []= $elem516; } $xfer += $input->readListEnd(); } else { @@ -16691,9 +17168,9 @@ class GetOpenTxnsResponse { { $output->writeListBegin(TType::I64, count($this->open_txns)); { - foreach ($this->open_txns as $iter508) + foreach ($this->open_txns as $iter517) { - $xfer += $output->writeI64($iter508); + $xfer += $output->writeI64($iter517); } } $output->writeListEnd(); @@ -16857,14 +17334,14 @@ class OpenTxnRequest { case 6: if ($ftype == TType::LST) { $this->replSrcTxnIds = array(); - $_size509 = 0; - $_etype512 = 0; - $xfer += $input->readListBegin($_etype512, $_size509); - for ($_i513 = 0; $_i513 < $_size509; ++$_i513) + $_size518 = 0; + $_etype521 = 0; + $xfer += $input->readListBegin($_etype521, $_size518); + for ($_i522 = 0; $_i522 < $_size518; ++$_i522) { - $elem514 = null; - $xfer += $input->readI64($elem514); - $this->replSrcTxnIds []= $elem514; + $elem523 = null; + $xfer += $input->readI64($elem523); + $this->replSrcTxnIds []= $elem523; } $xfer += $input->readListEnd(); } else { @@ -16917,9 +17394,9 @@ class OpenTxnRequest { { $output->writeListBegin(TType::I64, count($this->replSrcTxnIds)); { - foreach ($this->replSrcTxnIds as $iter515) + foreach ($this->replSrcTxnIds as $iter524) { - $xfer += $output->writeI64($iter515); + $xfer += $output->writeI64($iter524); } } $output->writeListEnd(); @@ -16983,14 +17460,14 @@ class OpenTxnsResponse { case 1: if ($ftype == TType::LST) { $this->txn_ids = array(); - $_size516 = 0; - $_etype519 = 0; - $xfer += $input->readListBegin($_etype519, $_size516); - for ($_i520 = 0; $_i520 < $_size516; ++$_i520) + $_size525 = 0; + $_etype528 = 0; + $xfer += $input->readListBegin($_etype528, $_size525); + for ($_i529 = 0; $_i529 < $_size525; ++$_i529) { - $elem521 = null; - $xfer += $input->readI64($elem521); - $this->txn_ids []= $elem521; + $elem530 = null; + $xfer += $input->readI64($elem530); + $this->txn_ids []= $elem530; } $xfer += $input->readListEnd(); } else { @@ -17018,9 +17495,9 @@ class OpenTxnsResponse { { $output->writeListBegin(TType::I64, count($this->txn_ids)); { - foreach ($this->txn_ids as $iter522) + foreach ($this->txn_ids as $iter531) { - $xfer += $output->writeI64($iter522); + $xfer += $output->writeI64($iter531); } } $output->writeListEnd(); @@ -17182,14 +17659,14 @@ class AbortTxnsRequest { case 1: if ($ftype == TType::LST) { $this->txn_ids = array(); - $_size523 = 0; - $_etype526 = 0; - $xfer += $input->readListBegin($_etype526, $_size523); - for ($_i527 = 0; $_i527 < $_size523; ++$_i527) + $_size532 = 0; + $_etype535 = 0; + $xfer += $input->readListBegin($_etype535, $_size532); + for ($_i536 = 0; $_i536 < $_size532; ++$_i536) { - $elem528 = null; - $xfer += $input->readI64($elem528); - $this->txn_ids []= $elem528; + $elem537 = null; + $xfer += $input->readI64($elem537); + $this->txn_ids []= $elem537; } $xfer += $input->readListEnd(); } else { @@ -17217,9 +17694,9 @@ class AbortTxnsRequest { { $output->writeListBegin(TType::I64, count($this->txn_ids)); { - foreach ($this->txn_ids as $iter529) + foreach ($this->txn_ids as $iter538) { - $xfer += $output->writeI64($iter529); + $xfer += $output->writeI64($iter538); } } $output->writeListEnd(); @@ -17320,15 +17797,15 @@ class CommitTxnRequest { case 3: if ($ftype == TType::LST) { $this->writeEventInfos = array(); - $_size530 = 0; - $_etype533 = 0; - $xfer += $input->readListBegin($_etype533, $_size530); - for ($_i534 = 0; $_i534 < $_size530; ++$_i534) + $_size539 = 0; + $_etype542 = 0; + $xfer += $input->readListBegin($_etype542, $_size539); + for ($_i543 = 0; $_i543 < $_size539; ++$_i543) { - $elem535 = null; - $elem535 = new \metastore\WriteEventInfo(); - $xfer += $elem535->read($input); - $this->writeEventInfos []= $elem535; + $elem544 = null; + $elem544 = new \metastore\WriteEventInfo(); + $xfer += $elem544->read($input); + $this->writeEventInfos []= $elem544; } $xfer += $input->readListEnd(); } else { @@ -17366,9 +17843,9 @@ class CommitTxnRequest { { $output->writeListBegin(TType::STRUCT, count($this->writeEventInfos)); { - foreach ($this->writeEventInfos as $iter536) + foreach ($this->writeEventInfos as $iter545) { - $xfer += $iter536->write($output); + $xfer += $iter545->write($output); } } $output->writeListEnd(); @@ -17735,14 +18212,14 @@ class ReplTblWriteIdStateRequest { case 6: if ($ftype == TType::LST) { $this->partNames = array(); - $_size537 = 0; - $_etype540 = 0; - $xfer += $input->readListBegin($_etype540, $_size537); - for ($_i541 = 0; $_i541 < $_size537; ++$_i541) + $_size546 = 0; + $_etype549 = 0; + $xfer += $input->readListBegin($_etype549, $_size546); + for ($_i550 = 0; $_i550 < $_size546; ++$_i550) { - $elem542 = null; - $xfer += $input->readString($elem542); - $this->partNames []= $elem542; + $elem551 = null; + $xfer += $input->readString($elem551); + $this->partNames []= $elem551; } $xfer += $input->readListEnd(); } else { @@ -17795,9 +18272,9 @@ class ReplTblWriteIdStateRequest { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter543) + foreach ($this->partNames as $iter552) { - $xfer += $output->writeString($iter543); + $xfer += $output->writeString($iter552); } } $output->writeListEnd(); @@ -17872,14 +18349,14 @@ class GetValidWriteIdsRequest { case 1: if ($ftype == TType::LST) { $this->fullTableNames = array(); - $_size544 = 0; - $_etype547 = 0; - $xfer += $input->readListBegin($_etype547, $_size544); - for ($_i548 = 0; $_i548 < $_size544; ++$_i548) + $_size553 = 0; + $_etype556 = 0; + $xfer += $input->readListBegin($_etype556, $_size553); + for ($_i557 = 0; $_i557 < $_size553; ++$_i557) { - $elem549 = null; - $xfer += $input->readString($elem549); - $this->fullTableNames []= $elem549; + $elem558 = null; + $xfer += $input->readString($elem558); + $this->fullTableNames []= $elem558; } $xfer += $input->readListEnd(); } else { @@ -17914,9 +18391,9 @@ class GetValidWriteIdsRequest { { $output->writeListBegin(TType::STRING, count($this->fullTableNames)); { - foreach ($this->fullTableNames as $iter550) + foreach ($this->fullTableNames as $iter559) { - $xfer += $output->writeString($iter550); + $xfer += $output->writeString($iter559); } } $output->writeListEnd(); @@ -18043,14 +18520,14 @@ class TableValidWriteIds { case 3: if ($ftype == TType::LST) { $this->invalidWriteIds = array(); - $_size551 = 0; - $_etype554 = 0; - $xfer += $input->readListBegin($_etype554, $_size551); - for ($_i555 = 0; $_i555 < $_size551; ++$_i555) + $_size560 = 0; + $_etype563 = 0; + $xfer += $input->readListBegin($_etype563, $_size560); + for ($_i564 = 0; $_i564 < $_size560; ++$_i564) { - $elem556 = null; - $xfer += $input->readI64($elem556); - $this->invalidWriteIds []= $elem556; + $elem565 = null; + $xfer += $input->readI64($elem565); + $this->invalidWriteIds []= $elem565; } $xfer += $input->readListEnd(); } else { @@ -18102,9 +18579,9 @@ class TableValidWriteIds { { $output->writeListBegin(TType::I64, count($this->invalidWriteIds)); { - foreach ($this->invalidWriteIds as $iter557) + foreach ($this->invalidWriteIds as $iter566) { - $xfer += $output->writeI64($iter557); + $xfer += $output->writeI64($iter566); } } $output->writeListEnd(); @@ -18179,15 +18656,15 @@ class GetValidWriteIdsResponse { case 1: if ($ftype == TType::LST) { $this->tblValidWriteIds = array(); - $_size558 = 0; - $_etype561 = 0; - $xfer += $input->readListBegin($_etype561, $_size558); - for ($_i562 = 0; $_i562 < $_size558; ++$_i562) + $_size567 = 0; + $_etype570 = 0; + $xfer += $input->readListBegin($_etype570, $_size567); + for ($_i571 = 0; $_i571 < $_size567; ++$_i571) { - $elem563 = null; - $elem563 = new \metastore\TableValidWriteIds(); - $xfer += $elem563->read($input); - $this->tblValidWriteIds []= $elem563; + $elem572 = null; + $elem572 = new \metastore\TableValidWriteIds(); + $xfer += $elem572->read($input); + $this->tblValidWriteIds []= $elem572; } $xfer += $input->readListEnd(); } else { @@ -18215,9 +18692,9 @@ class GetValidWriteIdsResponse { { $output->writeListBegin(TType::STRUCT, count($this->tblValidWriteIds)); { - foreach ($this->tblValidWriteIds as $iter564) + foreach ($this->tblValidWriteIds as $iter573) { - $xfer += $iter564->write($output); + $xfer += $iter573->write($output); } } $output->writeListEnd(); @@ -18344,14 +18821,14 @@ class AllocateTableWriteIdsRequest { case 3: if ($ftype == TType::LST) { $this->txnIds = array(); - $_size565 = 0; - $_etype568 = 0; - $xfer += $input->readListBegin($_etype568, $_size565); - for ($_i569 = 0; $_i569 < $_size565; ++$_i569) + $_size574 = 0; + $_etype577 = 0; + $xfer += $input->readListBegin($_etype577, $_size574); + for ($_i578 = 0; $_i578 < $_size574; ++$_i578) { - $elem570 = null; - $xfer += $input->readI64($elem570); - $this->txnIds []= $elem570; + $elem579 = null; + $xfer += $input->readI64($elem579); + $this->txnIds []= $elem579; } $xfer += $input->readListEnd(); } else { @@ -18368,15 +18845,15 @@ class AllocateTableWriteIdsRequest { case 5: if ($ftype == TType::LST) { $this->srcTxnToWriteIdList = array(); - $_size571 = 0; - $_etype574 = 0; - $xfer += $input->readListBegin($_etype574, $_size571); - for ($_i575 = 0; $_i575 < $_size571; ++$_i575) + $_size580 = 0; + $_etype583 = 0; + $xfer += $input->readListBegin($_etype583, $_size580); + for ($_i584 = 0; $_i584 < $_size580; ++$_i584) { - $elem576 = null; - $elem576 = new \metastore\TxnToWriteId(); - $xfer += $elem576->read($input); - $this->srcTxnToWriteIdList []= $elem576; + $elem585 = null; + $elem585 = new \metastore\TxnToWriteId(); + $xfer += $elem585->read($input); + $this->srcTxnToWriteIdList []= $elem585; } $xfer += $input->readListEnd(); } else { @@ -18414,9 +18891,9 @@ class AllocateTableWriteIdsRequest { { $output->writeListBegin(TType::I64, count($this->txnIds)); { - foreach ($this->txnIds as $iter577) + foreach ($this->txnIds as $iter586) { - $xfer += $output->writeI64($iter577); + $xfer += $output->writeI64($iter586); } } $output->writeListEnd(); @@ -18436,9 +18913,9 @@ class AllocateTableWriteIdsRequest { { $output->writeListBegin(TType::STRUCT, count($this->srcTxnToWriteIdList)); { - foreach ($this->srcTxnToWriteIdList as $iter578) + foreach ($this->srcTxnToWriteIdList as $iter587) { - $xfer += $iter578->write($output); + $xfer += $iter587->write($output); } } $output->writeListEnd(); @@ -18601,15 +19078,15 @@ class AllocateTableWriteIdsResponse { case 1: if ($ftype == TType::LST) { $this->txnToWriteIds = array(); - $_size579 = 0; - $_etype582 = 0; - $xfer += $input->readListBegin($_etype582, $_size579); - for ($_i583 = 0; $_i583 < $_size579; ++$_i583) + $_size588 = 0; + $_etype591 = 0; + $xfer += $input->readListBegin($_etype591, $_size588); + for ($_i592 = 0; $_i592 < $_size588; ++$_i592) { - $elem584 = null; - $elem584 = new \metastore\TxnToWriteId(); - $xfer += $elem584->read($input); - $this->txnToWriteIds []= $elem584; + $elem593 = null; + $elem593 = new \metastore\TxnToWriteId(); + $xfer += $elem593->read($input); + $this->txnToWriteIds []= $elem593; } $xfer += $input->readListEnd(); } else { @@ -18637,9 +19114,9 @@ class AllocateTableWriteIdsResponse { { $output->writeListBegin(TType::STRUCT, count($this->txnToWriteIds)); { - foreach ($this->txnToWriteIds as $iter585) + foreach ($this->txnToWriteIds as $iter594) { - $xfer += $iter585->write($output); + $xfer += $iter594->write($output); } } $output->writeListEnd(); @@ -18984,15 +19461,15 @@ class LockRequest { case 1: if ($ftype == TType::LST) { $this->component = array(); - $_size586 = 0; - $_etype589 = 0; - $xfer += $input->readListBegin($_etype589, $_size586); - for ($_i590 = 0; $_i590 < $_size586; ++$_i590) + $_size595 = 0; + $_etype598 = 0; + $xfer += $input->readListBegin($_etype598, $_size595); + for ($_i599 = 0; $_i599 < $_size595; ++$_i599) { - $elem591 = null; - $elem591 = new \metastore\LockComponent(); - $xfer += $elem591->read($input); - $this->component []= $elem591; + $elem600 = null; + $elem600 = new \metastore\LockComponent(); + $xfer += $elem600->read($input); + $this->component []= $elem600; } $xfer += $input->readListEnd(); } else { @@ -19048,9 +19525,9 @@ class LockRequest { { $output->writeListBegin(TType::STRUCT, count($this->component)); { - foreach ($this->component as $iter592) + foreach ($this->component as $iter601) { - $xfer += $iter592->write($output); + $xfer += $iter601->write($output); } } $output->writeListEnd(); @@ -19993,15 +20470,15 @@ class ShowLocksResponse { case 1: if ($ftype == TType::LST) { $this->locks = array(); - $_size593 = 0; - $_etype596 = 0; - $xfer += $input->readListBegin($_etype596, $_size593); - for ($_i597 = 0; $_i597 < $_size593; ++$_i597) + $_size602 = 0; + $_etype605 = 0; + $xfer += $input->readListBegin($_etype605, $_size602); + for ($_i606 = 0; $_i606 < $_size602; ++$_i606) { - $elem598 = null; - $elem598 = new \metastore\ShowLocksResponseElement(); - $xfer += $elem598->read($input); - $this->locks []= $elem598; + $elem607 = null; + $elem607 = new \metastore\ShowLocksResponseElement(); + $xfer += $elem607->read($input); + $this->locks []= $elem607; } $xfer += $input->readListEnd(); } else { @@ -20029,9 +20506,9 @@ class ShowLocksResponse { { $output->writeListBegin(TType::STRUCT, count($this->locks)); { - foreach ($this->locks as $iter599) + foreach ($this->locks as $iter608) { - $xfer += $iter599->write($output); + $xfer += $iter608->write($output); } } $output->writeListEnd(); @@ -20306,17 +20783,17 @@ class HeartbeatTxnRangeResponse { case 1: if ($ftype == TType::SET) { $this->aborted = array(); - $_size600 = 0; - $_etype603 = 0; - $xfer += $input->readSetBegin($_etype603, $_size600); - for ($_i604 = 0; $_i604 < $_size600; ++$_i604) + $_size609 = 0; + $_etype612 = 0; + $xfer += $input->readSetBegin($_etype612, $_size609); + for ($_i613 = 0; $_i613 < $_size609; ++$_i613) { - $elem605 = null; - $xfer += $input->readI64($elem605); - if (is_scalar($elem605)) { - $this->aborted[$elem605] = true; + $elem614 = null; + $xfer += $input->readI64($elem614); + if (is_scalar($elem614)) { + $this->aborted[$elem614] = true; } else { - $this->aborted []= $elem605; + $this->aborted []= $elem614; } } $xfer += $input->readSetEnd(); @@ -20327,17 +20804,17 @@ class HeartbeatTxnRangeResponse { case 2: if ($ftype == TType::SET) { $this->nosuch = array(); - $_size606 = 0; - $_etype609 = 0; - $xfer += $input->readSetBegin($_etype609, $_size606); - for ($_i610 = 0; $_i610 < $_size606; ++$_i610) + $_size615 = 0; + $_etype618 = 0; + $xfer += $input->readSetBegin($_etype618, $_size615); + for ($_i619 = 0; $_i619 < $_size615; ++$_i619) { - $elem611 = null; - $xfer += $input->readI64($elem611); - if (is_scalar($elem611)) { - $this->nosuch[$elem611] = true; + $elem620 = null; + $xfer += $input->readI64($elem620); + if (is_scalar($elem620)) { + $this->nosuch[$elem620] = true; } else { - $this->nosuch []= $elem611; + $this->nosuch []= $elem620; } } $xfer += $input->readSetEnd(); @@ -20366,12 +20843,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->aborted)); { - foreach ($this->aborted as $iter612 => $iter613) + foreach ($this->aborted as $iter621 => $iter622) { - if (is_scalar($iter613)) { - $xfer += $output->writeI64($iter612); + if (is_scalar($iter622)) { + $xfer += $output->writeI64($iter621); } else { - $xfer += $output->writeI64($iter613); + $xfer += $output->writeI64($iter622); } } } @@ -20387,12 +20864,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->nosuch)); { - foreach ($this->nosuch as $iter614 => $iter615) + foreach ($this->nosuch as $iter623 => $iter624) { - if (is_scalar($iter615)) { - $xfer += $output->writeI64($iter614); + if (is_scalar($iter624)) { + $xfer += $output->writeI64($iter623); } else { - $xfer += $output->writeI64($iter615); + $xfer += $output->writeI64($iter624); } } } @@ -20551,17 +21028,17 @@ class CompactionRequest { case 6: if ($ftype == TType::MAP) { $this->properties = array(); - $_size616 = 0; - $_ktype617 = 0; - $_vtype618 = 0; - $xfer += $input->readMapBegin($_ktype617, $_vtype618, $_size616); - for ($_i620 = 0; $_i620 < $_size616; ++$_i620) + $_size625 = 0; + $_ktype626 = 0; + $_vtype627 = 0; + $xfer += $input->readMapBegin($_ktype626, $_vtype627, $_size625); + for ($_i629 = 0; $_i629 < $_size625; ++$_i629) { - $key621 = ''; - $val622 = ''; - $xfer += $input->readString($key621); - $xfer += $input->readString($val622); - $this->properties[$key621] = $val622; + $key630 = ''; + $val631 = ''; + $xfer += $input->readString($key630); + $xfer += $input->readString($val631); + $this->properties[$key630] = $val631; } $xfer += $input->readMapEnd(); } else { @@ -20614,10 +21091,10 @@ class CompactionRequest { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter623 => $viter624) + foreach ($this->properties as $kiter632 => $viter633) { - $xfer += $output->writeString($kiter623); - $xfer += $output->writeString($viter624); + $xfer += $output->writeString($kiter632); + $xfer += $output->writeString($viter633); } } $output->writeMapEnd(); @@ -21204,15 +21681,15 @@ class ShowCompactResponse { case 1: if ($ftype == TType::LST) { $this->compacts = array(); - $_size625 = 0; - $_etype628 = 0; - $xfer += $input->readListBegin($_etype628, $_size625); - for ($_i629 = 0; $_i629 < $_size625; ++$_i629) + $_size634 = 0; + $_etype637 = 0; + $xfer += $input->readListBegin($_etype637, $_size634); + for ($_i638 = 0; $_i638 < $_size634; ++$_i638) { - $elem630 = null; - $elem630 = new \metastore\ShowCompactResponseElement(); - $xfer += $elem630->read($input); - $this->compacts []= $elem630; + $elem639 = null; + $elem639 = new \metastore\ShowCompactResponseElement(); + $xfer += $elem639->read($input); + $this->compacts []= $elem639; } $xfer += $input->readListEnd(); } else { @@ -21240,9 +21717,9 @@ class ShowCompactResponse { { $output->writeListBegin(TType::STRUCT, count($this->compacts)); { - foreach ($this->compacts as $iter631) + foreach ($this->compacts as $iter640) { - $xfer += $iter631->write($output); + $xfer += $iter640->write($output); } } $output->writeListEnd(); @@ -21389,14 +21866,14 @@ class AddDynamicPartitions { case 5: if ($ftype == TType::LST) { $this->partitionnames = array(); - $_size632 = 0; - $_etype635 = 0; - $xfer += $input->readListBegin($_etype635, $_size632); - for ($_i636 = 0; $_i636 < $_size632; ++$_i636) + $_size641 = 0; + $_etype644 = 0; + $xfer += $input->readListBegin($_etype644, $_size641); + for ($_i645 = 0; $_i645 < $_size641; ++$_i645) { - $elem637 = null; - $xfer += $input->readString($elem637); - $this->partitionnames []= $elem637; + $elem646 = null; + $xfer += $input->readString($elem646); + $this->partitionnames []= $elem646; } $xfer += $input->readListEnd(); } else { @@ -21451,9 +21928,9 @@ class AddDynamicPartitions { { $output->writeListBegin(TType::STRING, count($this->partitionnames)); { - foreach ($this->partitionnames as $iter638) + foreach ($this->partitionnames as $iter647) { - $xfer += $output->writeString($iter638); + $xfer += $output->writeString($iter647); } } $output->writeListEnd(); @@ -21788,17 +22265,17 @@ class CreationMetadata { case 4: if ($ftype == TType::SET) { $this->tablesUsed = array(); - $_size639 = 0; - $_etype642 = 0; - $xfer += $input->readSetBegin($_etype642, $_size639); - for ($_i643 = 0; $_i643 < $_size639; ++$_i643) + $_size648 = 0; + $_etype651 = 0; + $xfer += $input->readSetBegin($_etype651, $_size648); + for ($_i652 = 0; $_i652 < $_size648; ++$_i652) { - $elem644 = null; - $xfer += $input->readString($elem644); - if (is_scalar($elem644)) { - $this->tablesUsed[$elem644] = true; + $elem653 = null; + $xfer += $input->readString($elem653); + if (is_scalar($elem653)) { + $this->tablesUsed[$elem653] = true; } else { - $this->tablesUsed []= $elem644; + $this->tablesUsed []= $elem653; } } $xfer += $input->readSetEnd(); @@ -21856,12 +22333,12 @@ class CreationMetadata { { $output->writeSetBegin(TType::STRING, count($this->tablesUsed)); { - foreach ($this->tablesUsed as $iter645 => $iter646) + foreach ($this->tablesUsed as $iter654 => $iter655) { - if (is_scalar($iter646)) { - $xfer += $output->writeString($iter645); + if (is_scalar($iter655)) { + $xfer += $output->writeString($iter654); } else { - $xfer += $output->writeString($iter646); + $xfer += $output->writeString($iter655); } } } @@ -22271,15 +22748,15 @@ class NotificationEventResponse { case 1: if ($ftype == TType::LST) { $this->events = array(); - $_size647 = 0; - $_etype650 = 0; - $xfer += $input->readListBegin($_etype650, $_size647); - for ($_i651 = 0; $_i651 < $_size647; ++$_i651) + $_size656 = 0; + $_etype659 = 0; + $xfer += $input->readListBegin($_etype659, $_size656); + for ($_i660 = 0; $_i660 < $_size656; ++$_i660) { - $elem652 = null; - $elem652 = new \metastore\NotificationEvent(); - $xfer += $elem652->read($input); - $this->events []= $elem652; + $elem661 = null; + $elem661 = new \metastore\NotificationEvent(); + $xfer += $elem661->read($input); + $this->events []= $elem661; } $xfer += $input->readListEnd(); } else { @@ -22307,9 +22784,9 @@ class NotificationEventResponse { { $output->writeListBegin(TType::STRUCT, count($this->events)); { - foreach ($this->events as $iter653) + foreach ($this->events as $iter662) { - $xfer += $iter653->write($output); + $xfer += $iter662->write($output); } } $output->writeListEnd(); @@ -22692,14 +23169,14 @@ class InsertEventRequestData { case 2: if ($ftype == TType::LST) { $this->filesAdded = array(); - $_size654 = 0; - $_etype657 = 0; - $xfer += $input->readListBegin($_etype657, $_size654); - for ($_i658 = 0; $_i658 < $_size654; ++$_i658) + $_size663 = 0; + $_etype666 = 0; + $xfer += $input->readListBegin($_etype666, $_size663); + for ($_i667 = 0; $_i667 < $_size663; ++$_i667) { - $elem659 = null; - $xfer += $input->readString($elem659); - $this->filesAdded []= $elem659; + $elem668 = null; + $xfer += $input->readString($elem668); + $this->filesAdded []= $elem668; } $xfer += $input->readListEnd(); } else { @@ -22709,14 +23186,14 @@ class InsertEventRequestData { case 3: if ($ftype == TType::LST) { $this->filesAddedChecksum = array(); - $_size660 = 0; - $_etype663 = 0; - $xfer += $input->readListBegin($_etype663, $_size660); - for ($_i664 = 0; $_i664 < $_size660; ++$_i664) + $_size669 = 0; + $_etype672 = 0; + $xfer += $input->readListBegin($_etype672, $_size669); + for ($_i673 = 0; $_i673 < $_size669; ++$_i673) { - $elem665 = null; - $xfer += $input->readString($elem665); - $this->filesAddedChecksum []= $elem665; + $elem674 = null; + $xfer += $input->readString($elem674); + $this->filesAddedChecksum []= $elem674; } $xfer += $input->readListEnd(); } else { @@ -22726,14 +23203,14 @@ class InsertEventRequestData { case 4: if ($ftype == TType::LST) { $this->subDirectoryList = array(); - $_size666 = 0; - $_etype669 = 0; - $xfer += $input->readListBegin($_etype669, $_size666); - for ($_i670 = 0; $_i670 < $_size666; ++$_i670) + $_size675 = 0; + $_etype678 = 0; + $xfer += $input->readListBegin($_etype678, $_size675); + for ($_i679 = 0; $_i679 < $_size675; ++$_i679) { - $elem671 = null; - $xfer += $input->readString($elem671); - $this->subDirectoryList []= $elem671; + $elem680 = null; + $xfer += $input->readString($elem680); + $this->subDirectoryList []= $elem680; } $xfer += $input->readListEnd(); } else { @@ -22766,9 +23243,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAdded)); { - foreach ($this->filesAdded as $iter672) + foreach ($this->filesAdded as $iter681) { - $xfer += $output->writeString($iter672); + $xfer += $output->writeString($iter681); } } $output->writeListEnd(); @@ -22783,9 +23260,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAddedChecksum)); { - foreach ($this->filesAddedChecksum as $iter673) + foreach ($this->filesAddedChecksum as $iter682) { - $xfer += $output->writeString($iter673); + $xfer += $output->writeString($iter682); } } $output->writeListEnd(); @@ -22800,9 +23277,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->subDirectoryList)); { - foreach ($this->subDirectoryList as $iter674) + foreach ($this->subDirectoryList as $iter683) { - $xfer += $output->writeString($iter674); + $xfer += $output->writeString($iter683); } } $output->writeListEnd(); @@ -23031,14 +23508,14 @@ class FireEventRequest { case 5: if ($ftype == TType::LST) { $this->partitionVals = array(); - $_size675 = 0; - $_etype678 = 0; - $xfer += $input->readListBegin($_etype678, $_size675); - for ($_i679 = 0; $_i679 < $_size675; ++$_i679) + $_size684 = 0; + $_etype687 = 0; + $xfer += $input->readListBegin($_etype687, $_size684); + for ($_i688 = 0; $_i688 < $_size684; ++$_i688) { - $elem680 = null; - $xfer += $input->readString($elem680); - $this->partitionVals []= $elem680; + $elem689 = null; + $xfer += $input->readString($elem689); + $this->partitionVals []= $elem689; } $xfer += $input->readListEnd(); } else { @@ -23096,9 +23573,9 @@ class FireEventRequest { { $output->writeListBegin(TType::STRING, count($this->partitionVals)); { - foreach ($this->partitionVals as $iter681) + foreach ($this->partitionVals as $iter690) { - $xfer += $output->writeString($iter681); + $xfer += $output->writeString($iter690); } } $output->writeListEnd(); @@ -23309,14 +23786,14 @@ class WriteNotificationLogRequest { case 6: if ($ftype == TType::LST) { $this->partitionVals = array(); - $_size682 = 0; - $_etype685 = 0; - $xfer += $input->readListBegin($_etype685, $_size682); - for ($_i686 = 0; $_i686 < $_size682; ++$_i686) + $_size691 = 0; + $_etype694 = 0; + $xfer += $input->readListBegin($_etype694, $_size691); + for ($_i695 = 0; $_i695 < $_size691; ++$_i695) { - $elem687 = null; - $xfer += $input->readString($elem687); - $this->partitionVals []= $elem687; + $elem696 = null; + $xfer += $input->readString($elem696); + $this->partitionVals []= $elem696; } $xfer += $input->readListEnd(); } else { @@ -23372,9 +23849,9 @@ class WriteNotificationLogRequest { { $output->writeListBegin(TType::STRING, count($this->partitionVals)); { - foreach ($this->partitionVals as $iter688) + foreach ($this->partitionVals as $iter697) { - $xfer += $output->writeString($iter688); + $xfer += $output->writeString($iter697); } } $output->writeListEnd(); @@ -23602,18 +24079,18 @@ class GetFileMetadataByExprResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size689 = 0; - $_ktype690 = 0; - $_vtype691 = 0; - $xfer += $input->readMapBegin($_ktype690, $_vtype691, $_size689); - for ($_i693 = 0; $_i693 < $_size689; ++$_i693) + $_size698 = 0; + $_ktype699 = 0; + $_vtype700 = 0; + $xfer += $input->readMapBegin($_ktype699, $_vtype700, $_size698); + for ($_i702 = 0; $_i702 < $_size698; ++$_i702) { - $key694 = 0; - $val695 = new \metastore\MetadataPpdResult(); - $xfer += $input->readI64($key694); - $val695 = new \metastore\MetadataPpdResult(); - $xfer += $val695->read($input); - $this->metadata[$key694] = $val695; + $key703 = 0; + $val704 = new \metastore\MetadataPpdResult(); + $xfer += $input->readI64($key703); + $val704 = new \metastore\MetadataPpdResult(); + $xfer += $val704->read($input); + $this->metadata[$key703] = $val704; } $xfer += $input->readMapEnd(); } else { @@ -23648,10 +24125,10 @@ class GetFileMetadataByExprResult { { $output->writeMapBegin(TType::I64, TType::STRUCT, count($this->metadata)); { - foreach ($this->metadata as $kiter696 => $viter697) + foreach ($this->metadata as $kiter705 => $viter706) { - $xfer += $output->writeI64($kiter696); - $xfer += $viter697->write($output); + $xfer += $output->writeI64($kiter705); + $xfer += $viter706->write($output); } } $output->writeMapEnd(); @@ -23753,14 +24230,14 @@ class GetFileMetadataByExprRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size698 = 0; - $_etype701 = 0; - $xfer += $input->readListBegin($_etype701, $_size698); - for ($_i702 = 0; $_i702 < $_size698; ++$_i702) + $_size707 = 0; + $_etype710 = 0; + $xfer += $input->readListBegin($_etype710, $_size707); + for ($_i711 = 0; $_i711 < $_size707; ++$_i711) { - $elem703 = null; - $xfer += $input->readI64($elem703); - $this->fileIds []= $elem703; + $elem712 = null; + $xfer += $input->readI64($elem712); + $this->fileIds []= $elem712; } $xfer += $input->readListEnd(); } else { @@ -23809,9 +24286,9 @@ class GetFileMetadataByExprRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter704) + foreach ($this->fileIds as $iter713) { - $xfer += $output->writeI64($iter704); + $xfer += $output->writeI64($iter713); } } $output->writeListEnd(); @@ -23905,17 +24382,17 @@ class GetFileMetadataResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size705 = 0; - $_ktype706 = 0; - $_vtype707 = 0; - $xfer += $input->readMapBegin($_ktype706, $_vtype707, $_size705); - for ($_i709 = 0; $_i709 < $_size705; ++$_i709) + $_size714 = 0; + $_ktype715 = 0; + $_vtype716 = 0; + $xfer += $input->readMapBegin($_ktype715, $_vtype716, $_size714); + for ($_i718 = 0; $_i718 < $_size714; ++$_i718) { - $key710 = 0; - $val711 = ''; - $xfer += $input->readI64($key710); - $xfer += $input->readString($val711); - $this->metadata[$key710] = $val711; + $key719 = 0; + $val720 = ''; + $xfer += $input->readI64($key719); + $xfer += $input->readString($val720); + $this->metadata[$key719] = $val720; } $xfer += $input->readMapEnd(); } else { @@ -23950,10 +24427,10 @@ class GetFileMetadataResult { { $output->writeMapBegin(TType::I64, TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $kiter712 => $viter713) + foreach ($this->metadata as $kiter721 => $viter722) { - $xfer += $output->writeI64($kiter712); - $xfer += $output->writeString($viter713); + $xfer += $output->writeI64($kiter721); + $xfer += $output->writeString($viter722); } } $output->writeMapEnd(); @@ -24022,14 +24499,14 @@ class GetFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size714 = 0; - $_etype717 = 0; - $xfer += $input->readListBegin($_etype717, $_size714); - for ($_i718 = 0; $_i718 < $_size714; ++$_i718) + $_size723 = 0; + $_etype726 = 0; + $xfer += $input->readListBegin($_etype726, $_size723); + for ($_i727 = 0; $_i727 < $_size723; ++$_i727) { - $elem719 = null; - $xfer += $input->readI64($elem719); - $this->fileIds []= $elem719; + $elem728 = null; + $xfer += $input->readI64($elem728); + $this->fileIds []= $elem728; } $xfer += $input->readListEnd(); } else { @@ -24057,9 +24534,9 @@ class GetFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter720) + foreach ($this->fileIds as $iter729) { - $xfer += $output->writeI64($iter720); + $xfer += $output->writeI64($iter729); } } $output->writeListEnd(); @@ -24199,14 +24676,14 @@ class PutFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size721 = 0; - $_etype724 = 0; - $xfer += $input->readListBegin($_etype724, $_size721); - for ($_i725 = 0; $_i725 < $_size721; ++$_i725) + $_size730 = 0; + $_etype733 = 0; + $xfer += $input->readListBegin($_etype733, $_size730); + for ($_i734 = 0; $_i734 < $_size730; ++$_i734) { - $elem726 = null; - $xfer += $input->readI64($elem726); - $this->fileIds []= $elem726; + $elem735 = null; + $xfer += $input->readI64($elem735); + $this->fileIds []= $elem735; } $xfer += $input->readListEnd(); } else { @@ -24216,14 +24693,14 @@ class PutFileMetadataRequest { case 2: if ($ftype == TType::LST) { $this->metadata = array(); - $_size727 = 0; - $_etype730 = 0; - $xfer += $input->readListBegin($_etype730, $_size727); - for ($_i731 = 0; $_i731 < $_size727; ++$_i731) + $_size736 = 0; + $_etype739 = 0; + $xfer += $input->readListBegin($_etype739, $_size736); + for ($_i740 = 0; $_i740 < $_size736; ++$_i740) { - $elem732 = null; - $xfer += $input->readString($elem732); - $this->metadata []= $elem732; + $elem741 = null; + $xfer += $input->readString($elem741); + $this->metadata []= $elem741; } $xfer += $input->readListEnd(); } else { @@ -24258,9 +24735,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter733) + foreach ($this->fileIds as $iter742) { - $xfer += $output->writeI64($iter733); + $xfer += $output->writeI64($iter742); } } $output->writeListEnd(); @@ -24275,9 +24752,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $iter734) + foreach ($this->metadata as $iter743) { - $xfer += $output->writeString($iter734); + $xfer += $output->writeString($iter743); } } $output->writeListEnd(); @@ -24396,14 +24873,14 @@ class ClearFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size735 = 0; - $_etype738 = 0; - $xfer += $input->readListBegin($_etype738, $_size735); - for ($_i739 = 0; $_i739 < $_size735; ++$_i739) + $_size744 = 0; + $_etype747 = 0; + $xfer += $input->readListBegin($_etype747, $_size744); + for ($_i748 = 0; $_i748 < $_size744; ++$_i748) { - $elem740 = null; - $xfer += $input->readI64($elem740); - $this->fileIds []= $elem740; + $elem749 = null; + $xfer += $input->readI64($elem749); + $this->fileIds []= $elem749; } $xfer += $input->readListEnd(); } else { @@ -24431,9 +24908,9 @@ class ClearFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter741) + foreach ($this->fileIds as $iter750) { - $xfer += $output->writeI64($iter741); + $xfer += $output->writeI64($iter750); } } $output->writeListEnd(); @@ -24717,15 +25194,15 @@ class GetAllFunctionsResponse { case 1: if ($ftype == TType::LST) { $this->functions = array(); - $_size742 = 0; - $_etype745 = 0; - $xfer += $input->readListBegin($_etype745, $_size742); - for ($_i746 = 0; $_i746 < $_size742; ++$_i746) + $_size751 = 0; + $_etype754 = 0; + $xfer += $input->readListBegin($_etype754, $_size751); + for ($_i755 = 0; $_i755 < $_size751; ++$_i755) { - $elem747 = null; - $elem747 = new \metastore\Function(); - $xfer += $elem747->read($input); - $this->functions []= $elem747; + $elem756 = null; + $elem756 = new \metastore\Function(); + $xfer += $elem756->read($input); + $this->functions []= $elem756; } $xfer += $input->readListEnd(); } else { @@ -24753,9 +25230,9 @@ class GetAllFunctionsResponse { { $output->writeListBegin(TType::STRUCT, count($this->functions)); { - foreach ($this->functions as $iter748) + foreach ($this->functions as $iter757) { - $xfer += $iter748->write($output); + $xfer += $iter757->write($output); } } $output->writeListEnd(); @@ -24819,14 +25296,14 @@ class ClientCapabilities { case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size749 = 0; - $_etype752 = 0; - $xfer += $input->readListBegin($_etype752, $_size749); - for ($_i753 = 0; $_i753 < $_size749; ++$_i753) + $_size758 = 0; + $_etype761 = 0; + $xfer += $input->readListBegin($_etype761, $_size758); + for ($_i762 = 0; $_i762 < $_size758; ++$_i762) { - $elem754 = null; - $xfer += $input->readI32($elem754); - $this->values []= $elem754; + $elem763 = null; + $xfer += $input->readI32($elem763); + $this->values []= $elem763; } $xfer += $input->readListEnd(); } else { @@ -24854,9 +25331,9 @@ class ClientCapabilities { { $output->writeListBegin(TType::I32, count($this->values)); { - foreach ($this->values as $iter755) + foreach ($this->values as $iter764) { - $xfer += $output->writeI32($iter755); + $xfer += $output->writeI32($iter764); } } $output->writeListEnd(); @@ -25236,14 +25713,14 @@ class GetTablesRequest { case 2: if ($ftype == TType::LST) { $this->tblNames = array(); - $_size756 = 0; - $_etype759 = 0; - $xfer += $input->readListBegin($_etype759, $_size756); - for ($_i760 = 0; $_i760 < $_size756; ++$_i760) + $_size765 = 0; + $_etype768 = 0; + $xfer += $input->readListBegin($_etype768, $_size765); + for ($_i769 = 0; $_i769 < $_size765; ++$_i769) { - $elem761 = null; - $xfer += $input->readString($elem761); - $this->tblNames []= $elem761; + $elem770 = null; + $xfer += $input->readString($elem770); + $this->tblNames []= $elem770; } $xfer += $input->readListEnd(); } else { @@ -25291,9 +25768,9 @@ class GetTablesRequest { { $output->writeListBegin(TType::STRING, count($this->tblNames)); { - foreach ($this->tblNames as $iter762) + foreach ($this->tblNames as $iter771) { - $xfer += $output->writeString($iter762); + $xfer += $output->writeString($iter771); } } $output->writeListEnd(); @@ -25371,15 +25848,15 @@ class GetTablesResult { case 1: if ($ftype == TType::LST) { $this->tables = array(); - $_size763 = 0; - $_etype766 = 0; - $xfer += $input->readListBegin($_etype766, $_size763); - for ($_i767 = 0; $_i767 < $_size763; ++$_i767) + $_size772 = 0; + $_etype775 = 0; + $xfer += $input->readListBegin($_etype775, $_size772); + for ($_i776 = 0; $_i776 < $_size772; ++$_i776) { - $elem768 = null; - $elem768 = new \metastore\Table(); - $xfer += $elem768->read($input); - $this->tables []= $elem768; + $elem777 = null; + $elem777 = new \metastore\Table(); + $xfer += $elem777->read($input); + $this->tables []= $elem777; } $xfer += $input->readListEnd(); } else { @@ -25407,9 +25884,9 @@ class GetTablesResult { { $output->writeListBegin(TType::STRUCT, count($this->tables)); { - foreach ($this->tables as $iter769) + foreach ($this->tables as $iter778) { - $xfer += $iter769->write($output); + $xfer += $iter778->write($output); } } $output->writeListEnd(); @@ -27055,15 +27532,15 @@ class WMFullResourcePlan { case 2: if ($ftype == TType::LST) { $this->pools = array(); - $_size770 = 0; - $_etype773 = 0; - $xfer += $input->readListBegin($_etype773, $_size770); - for ($_i774 = 0; $_i774 < $_size770; ++$_i774) + $_size779 = 0; + $_etype782 = 0; + $xfer += $input->readListBegin($_etype782, $_size779); + for ($_i783 = 0; $_i783 < $_size779; ++$_i783) { - $elem775 = null; - $elem775 = new \metastore\WMPool(); - $xfer += $elem775->read($input); - $this->pools []= $elem775; + $elem784 = null; + $elem784 = new \metastore\WMPool(); + $xfer += $elem784->read($input); + $this->pools []= $elem784; } $xfer += $input->readListEnd(); } else { @@ -27073,15 +27550,15 @@ class WMFullResourcePlan { case 3: if ($ftype == TType::LST) { $this->mappings = array(); - $_size776 = 0; - $_etype779 = 0; - $xfer += $input->readListBegin($_etype779, $_size776); - for ($_i780 = 0; $_i780 < $_size776; ++$_i780) + $_size785 = 0; + $_etype788 = 0; + $xfer += $input->readListBegin($_etype788, $_size785); + for ($_i789 = 0; $_i789 < $_size785; ++$_i789) { - $elem781 = null; - $elem781 = new \metastore\WMMapping(); - $xfer += $elem781->read($input); - $this->mappings []= $elem781; + $elem790 = null; + $elem790 = new \metastore\WMMapping(); + $xfer += $elem790->read($input); + $this->mappings []= $elem790; } $xfer += $input->readListEnd(); } else { @@ -27091,15 +27568,15 @@ class WMFullResourcePlan { case 4: if ($ftype == TType::LST) { $this->triggers = array(); - $_size782 = 0; - $_etype785 = 0; - $xfer += $input->readListBegin($_etype785, $_size782); - for ($_i786 = 0; $_i786 < $_size782; ++$_i786) + $_size791 = 0; + $_etype794 = 0; + $xfer += $input->readListBegin($_etype794, $_size791); + for ($_i795 = 0; $_i795 < $_size791; ++$_i795) { - $elem787 = null; - $elem787 = new \metastore\WMTrigger(); - $xfer += $elem787->read($input); - $this->triggers []= $elem787; + $elem796 = null; + $elem796 = new \metastore\WMTrigger(); + $xfer += $elem796->read($input); + $this->triggers []= $elem796; } $xfer += $input->readListEnd(); } else { @@ -27109,15 +27586,15 @@ class WMFullResourcePlan { case 5: if ($ftype == TType::LST) { $this->poolTriggers = array(); - $_size788 = 0; - $_etype791 = 0; - $xfer += $input->readListBegin($_etype791, $_size788); - for ($_i792 = 0; $_i792 < $_size788; ++$_i792) + $_size797 = 0; + $_etype800 = 0; + $xfer += $input->readListBegin($_etype800, $_size797); + for ($_i801 = 0; $_i801 < $_size797; ++$_i801) { - $elem793 = null; - $elem793 = new \metastore\WMPoolTrigger(); - $xfer += $elem793->read($input); - $this->poolTriggers []= $elem793; + $elem802 = null; + $elem802 = new \metastore\WMPoolTrigger(); + $xfer += $elem802->read($input); + $this->poolTriggers []= $elem802; } $xfer += $input->readListEnd(); } else { @@ -27153,9 +27630,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->pools)); { - foreach ($this->pools as $iter794) + foreach ($this->pools as $iter803) { - $xfer += $iter794->write($output); + $xfer += $iter803->write($output); } } $output->writeListEnd(); @@ -27170,9 +27647,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->mappings)); { - foreach ($this->mappings as $iter795) + foreach ($this->mappings as $iter804) { - $xfer += $iter795->write($output); + $xfer += $iter804->write($output); } } $output->writeListEnd(); @@ -27187,9 +27664,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter796) + foreach ($this->triggers as $iter805) { - $xfer += $iter796->write($output); + $xfer += $iter805->write($output); } } $output->writeListEnd(); @@ -27204,9 +27681,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->poolTriggers)); { - foreach ($this->poolTriggers as $iter797) + foreach ($this->poolTriggers as $iter806) { - $xfer += $iter797->write($output); + $xfer += $iter806->write($output); } } $output->writeListEnd(); @@ -27759,15 +28236,15 @@ class WMGetAllResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->resourcePlans = array(); - $_size798 = 0; - $_etype801 = 0; - $xfer += $input->readListBegin($_etype801, $_size798); - for ($_i802 = 0; $_i802 < $_size798; ++$_i802) + $_size807 = 0; + $_etype810 = 0; + $xfer += $input->readListBegin($_etype810, $_size807); + for ($_i811 = 0; $_i811 < $_size807; ++$_i811) { - $elem803 = null; - $elem803 = new \metastore\WMResourcePlan(); - $xfer += $elem803->read($input); - $this->resourcePlans []= $elem803; + $elem812 = null; + $elem812 = new \metastore\WMResourcePlan(); + $xfer += $elem812->read($input); + $this->resourcePlans []= $elem812; } $xfer += $input->readListEnd(); } else { @@ -27795,9 +28272,9 @@ class WMGetAllResourcePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->resourcePlans)); { - foreach ($this->resourcePlans as $iter804) + foreach ($this->resourcePlans as $iter813) { - $xfer += $iter804->write($output); + $xfer += $iter813->write($output); } } $output->writeListEnd(); @@ -28203,14 +28680,14 @@ class WMValidateResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->errors = array(); - $_size805 = 0; - $_etype808 = 0; - $xfer += $input->readListBegin($_etype808, $_size805); - for ($_i809 = 0; $_i809 < $_size805; ++$_i809) + $_size814 = 0; + $_etype817 = 0; + $xfer += $input->readListBegin($_etype817, $_size814); + for ($_i818 = 0; $_i818 < $_size814; ++$_i818) { - $elem810 = null; - $xfer += $input->readString($elem810); - $this->errors []= $elem810; + $elem819 = null; + $xfer += $input->readString($elem819); + $this->errors []= $elem819; } $xfer += $input->readListEnd(); } else { @@ -28220,14 +28697,14 @@ class WMValidateResourcePlanResponse { case 2: if ($ftype == TType::LST) { $this->warnings = array(); - $_size811 = 0; - $_etype814 = 0; - $xfer += $input->readListBegin($_etype814, $_size811); - for ($_i815 = 0; $_i815 < $_size811; ++$_i815) + $_size820 = 0; + $_etype823 = 0; + $xfer += $input->readListBegin($_etype823, $_size820); + for ($_i824 = 0; $_i824 < $_size820; ++$_i824) { - $elem816 = null; - $xfer += $input->readString($elem816); - $this->warnings []= $elem816; + $elem825 = null; + $xfer += $input->readString($elem825); + $this->warnings []= $elem825; } $xfer += $input->readListEnd(); } else { @@ -28255,9 +28732,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->errors)); { - foreach ($this->errors as $iter817) + foreach ($this->errors as $iter826) { - $xfer += $output->writeString($iter817); + $xfer += $output->writeString($iter826); } } $output->writeListEnd(); @@ -28272,9 +28749,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->warnings)); { - foreach ($this->warnings as $iter818) + foreach ($this->warnings as $iter827) { - $xfer += $output->writeString($iter818); + $xfer += $output->writeString($iter827); } } $output->writeListEnd(); @@ -28947,15 +29424,15 @@ class WMGetTriggersForResourePlanResponse { case 1: if ($ftype == TType::LST) { $this->triggers = array(); - $_size819 = 0; - $_etype822 = 0; - $xfer += $input->readListBegin($_etype822, $_size819); - for ($_i823 = 0; $_i823 < $_size819; ++$_i823) + $_size828 = 0; + $_etype831 = 0; + $xfer += $input->readListBegin($_etype831, $_size828); + for ($_i832 = 0; $_i832 < $_size828; ++$_i832) { - $elem824 = null; - $elem824 = new \metastore\WMTrigger(); - $xfer += $elem824->read($input); - $this->triggers []= $elem824; + $elem833 = null; + $elem833 = new \metastore\WMTrigger(); + $xfer += $elem833->read($input); + $this->triggers []= $elem833; } $xfer += $input->readListEnd(); } else { @@ -28983,9 +29460,9 @@ class WMGetTriggersForResourePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter825) + foreach ($this->triggers as $iter834) { - $xfer += $iter825->write($output); + $xfer += $iter834->write($output); } } $output->writeListEnd(); @@ -30569,15 +31046,15 @@ class SchemaVersion { case 4: if ($ftype == TType::LST) { $this->cols = array(); - $_size826 = 0; - $_etype829 = 0; - $xfer += $input->readListBegin($_etype829, $_size826); - for ($_i830 = 0; $_i830 < $_size826; ++$_i830) + $_size835 = 0; + $_etype838 = 0; + $xfer += $input->readListBegin($_etype838, $_size835); + for ($_i839 = 0; $_i839 < $_size835; ++$_i839) { - $elem831 = null; - $elem831 = new \metastore\FieldSchema(); - $xfer += $elem831->read($input); - $this->cols []= $elem831; + $elem840 = null; + $elem840 = new \metastore\FieldSchema(); + $xfer += $elem840->read($input); + $this->cols []= $elem840; } $xfer += $input->readListEnd(); } else { @@ -30666,9 +31143,9 @@ class SchemaVersion { { $output->writeListBegin(TType::STRUCT, count($this->cols)); { - foreach ($this->cols as $iter832) + foreach ($this->cols as $iter841) { - $xfer += $iter832->write($output); + $xfer += $iter841->write($output); } } $output->writeListEnd(); @@ -30990,15 +31467,15 @@ class FindSchemasByColsResp { case 1: if ($ftype == TType::LST) { $this->schemaVersions = array(); - $_size833 = 0; - $_etype836 = 0; - $xfer += $input->readListBegin($_etype836, $_size833); - for ($_i837 = 0; $_i837 < $_size833; ++$_i837) + $_size842 = 0; + $_etype845 = 0; + $xfer += $input->readListBegin($_etype845, $_size842); + for ($_i846 = 0; $_i846 < $_size842; ++$_i846) { - $elem838 = null; - $elem838 = new \metastore\SchemaVersionDescriptor(); - $xfer += $elem838->read($input); - $this->schemaVersions []= $elem838; + $elem847 = null; + $elem847 = new \metastore\SchemaVersionDescriptor(); + $xfer += $elem847->read($input); + $this->schemaVersions []= $elem847; } $xfer += $input->readListEnd(); } else { @@ -31026,9 +31503,9 @@ class FindSchemasByColsResp { { $output->writeListBegin(TType::STRUCT, count($this->schemaVersions)); { - foreach ($this->schemaVersions as $iter839) + foreach ($this->schemaVersions as $iter848) { - $xfer += $iter839->write($output); + $xfer += $iter848->write($output); } } $output->writeListEnd(); @@ -31681,15 +32158,15 @@ class AlterPartitionsRequest { case 4: if ($ftype == TType::LST) { $this->partitions = array(); - $_size840 = 0; - $_etype843 = 0; - $xfer += $input->readListBegin($_etype843, $_size840); - for ($_i844 = 0; $_i844 < $_size840; ++$_i844) + $_size849 = 0; + $_etype852 = 0; + $xfer += $input->readListBegin($_etype852, $_size849); + for ($_i853 = 0; $_i853 < $_size849; ++$_i853) { - $elem845 = null; - $elem845 = new \metastore\Partition(); - $xfer += $elem845->read($input); - $this->partitions []= $elem845; + $elem854 = null; + $elem854 = new \metastore\Partition(); + $xfer += $elem854->read($input); + $this->partitions []= $elem854; } $xfer += $input->readListEnd(); } else { @@ -31754,9 +32231,9 @@ class AlterPartitionsRequest { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter846) + foreach ($this->partitions as $iter855) { - $xfer += $iter846->write($output); + $xfer += $iter855->write($output); } } $output->writeListEnd(); @@ -31965,14 +32442,14 @@ class RenamePartitionRequest { case 4: if ($ftype == TType::LST) { $this->partVals = array(); - $_size847 = 0; - $_etype850 = 0; - $xfer += $input->readListBegin($_etype850, $_size847); - for ($_i851 = 0; $_i851 < $_size847; ++$_i851) + $_size856 = 0; + $_etype859 = 0; + $xfer += $input->readListBegin($_etype859, $_size856); + for ($_i860 = 0; $_i860 < $_size856; ++$_i860) { - $elem852 = null; - $xfer += $input->readString($elem852); - $this->partVals []= $elem852; + $elem861 = null; + $xfer += $input->readString($elem861); + $this->partVals []= $elem861; } $xfer += $input->readListEnd(); } else { @@ -32030,9 +32507,9 @@ class RenamePartitionRequest { { $output->writeListBegin(TType::STRING, count($this->partVals)); { - foreach ($this->partVals as $iter853) + foreach ($this->partVals as $iter862) { - $xfer += $output->writeString($iter853); + $xfer += $output->writeString($iter862); } } $output->writeListEnd(); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index a595732f04..fb7763d6c8 100755 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -130,6 +130,9 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' bool update_partition_column_statistics(ColumnStatistics stats_obj)') print(' SetPartitionsStatsResponse update_table_column_statistics_req(SetPartitionsStatsRequest req)') print(' SetPartitionsStatsResponse update_partition_column_statistics_req(SetPartitionsStatsRequest req)') + print(' SetBasicStatsResponse update_table_basic_statistics_req(SetBasicStatsRequest req)') + print(' SetBasicStatsResponse update_partition_basic_statistics_req(SetBasicStatsRequest req)') + print(' InvalidateColumnStatsResponse invalidate_all_column_statistics_req(InvalidateColumnStatsRequest req)') print(' ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name)') print(' ColumnStatistics get_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)') print(' TableStatsResult get_table_statistics_req(TableStatsRequest request)') @@ -943,6 +946,24 @@ elif cmd == 'update_partition_column_statistics_req': sys.exit(1) pp.pprint(client.update_partition_column_statistics_req(eval(args[0]),)) +elif cmd == 'update_table_basic_statistics_req': + if len(args) != 1: + print('update_table_basic_statistics_req requires 1 args') + sys.exit(1) + pp.pprint(client.update_table_basic_statistics_req(eval(args[0]),)) + +elif cmd == 'update_partition_basic_statistics_req': + if len(args) != 1: + print('update_partition_basic_statistics_req requires 1 args') + sys.exit(1) + pp.pprint(client.update_partition_basic_statistics_req(eval(args[0]),)) + +elif cmd == 'invalidate_all_column_statistics_req': + if len(args) != 1: + print('invalidate_all_column_statistics_req requires 1 args') + sys.exit(1) + pp.pprint(client.invalidate_all_column_statistics_req(eval(args[0]),)) + elif cmd == 'get_table_column_statistics': if len(args) != 3: print('get_table_column_statistics requires 3 args') diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index d098dba100..75e248f30a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -895,6 +895,27 @@ def update_partition_column_statistics_req(self, req): """ pass + def update_table_basic_statistics_req(self, req): + """ + Parameters: + - req + """ + pass + + def update_partition_basic_statistics_req(self, req): + """ + Parameters: + - req + """ + pass + + def invalidate_all_column_statistics_req(self, req): + """ + Parameters: + - req + """ + pass + def get_table_column_statistics(self, db_name, tbl_name, col_name): """ Parameters: @@ -5636,6 +5657,123 @@ def recv_update_partition_column_statistics_req(self): raise result.o4 raise TApplicationException(TApplicationException.MISSING_RESULT, "update_partition_column_statistics_req failed: unknown result") + def update_table_basic_statistics_req(self, req): + """ + Parameters: + - req + """ + self.send_update_table_basic_statistics_req(req) + return self.recv_update_table_basic_statistics_req() + + def send_update_table_basic_statistics_req(self, req): + self._oprot.writeMessageBegin('update_table_basic_statistics_req', TMessageType.CALL, self._seqid) + args = update_table_basic_statistics_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_update_table_basic_statistics_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = update_table_basic_statistics_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "update_table_basic_statistics_req failed: unknown result") + + def update_partition_basic_statistics_req(self, req): + """ + Parameters: + - req + """ + self.send_update_partition_basic_statistics_req(req) + return self.recv_update_partition_basic_statistics_req() + + def send_update_partition_basic_statistics_req(self, req): + self._oprot.writeMessageBegin('update_partition_basic_statistics_req', TMessageType.CALL, self._seqid) + args = update_partition_basic_statistics_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_update_partition_basic_statistics_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = update_partition_basic_statistics_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "update_partition_basic_statistics_req failed: unknown result") + + def invalidate_all_column_statistics_req(self, req): + """ + Parameters: + - req + """ + self.send_invalidate_all_column_statistics_req(req) + return self.recv_invalidate_all_column_statistics_req() + + def send_invalidate_all_column_statistics_req(self, req): + self._oprot.writeMessageBegin('invalidate_all_column_statistics_req', TMessageType.CALL, self._seqid) + args = invalidate_all_column_statistics_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_invalidate_all_column_statistics_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = invalidate_all_column_statistics_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "invalidate_all_column_statistics_req failed: unknown result") + def get_table_column_statistics(self, db_name, tbl_name, col_name): """ Parameters: @@ -9407,6 +9545,9 @@ def __init__(self, handler): self._processMap["update_partition_column_statistics"] = Processor.process_update_partition_column_statistics self._processMap["update_table_column_statistics_req"] = Processor.process_update_table_column_statistics_req self._processMap["update_partition_column_statistics_req"] = Processor.process_update_partition_column_statistics_req + self._processMap["update_table_basic_statistics_req"] = Processor.process_update_table_basic_statistics_req + self._processMap["update_partition_basic_statistics_req"] = Processor.process_update_partition_basic_statistics_req + self._processMap["invalidate_all_column_statistics_req"] = Processor.process_invalidate_all_column_statistics_req self._processMap["get_table_column_statistics"] = Processor.process_get_table_column_statistics self._processMap["get_partition_column_statistics"] = Processor.process_get_partition_column_statistics self._processMap["get_table_statistics_req"] = Processor.process_get_table_statistics_req @@ -12271,6 +12412,99 @@ def process_update_partition_column_statistics_req(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_update_table_basic_statistics_req(self, seqid, iprot, oprot): + args = update_table_basic_statistics_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = update_table_basic_statistics_req_result() + try: + result.success = self._handler.update_table_basic_statistics_req(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidInputException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("update_table_basic_statistics_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_update_partition_basic_statistics_req(self, seqid, iprot, oprot): + args = update_partition_basic_statistics_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = update_partition_basic_statistics_req_result() + try: + result.success = self._handler.update_partition_basic_statistics_req(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidInputException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("update_partition_basic_statistics_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_invalidate_all_column_statistics_req(self, seqid, iprot, oprot): + args = invalidate_all_column_statistics_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = invalidate_all_column_statistics_req_result() + try: + result.success = self._handler.invalidate_all_column_statistics_req(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidInputException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("invalidate_all_column_statistics_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_get_table_column_statistics(self, seqid, iprot, oprot): args = get_table_column_statistics_args() args.read(iprot) @@ -16468,10 +16702,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype854, _size851) = iprot.readListBegin() - for _i855 in xrange(_size851): - _elem856 = iprot.readString() - self.success.append(_elem856) + (_etype863, _size860) = iprot.readListBegin() + for _i864 in xrange(_size860): + _elem865 = iprot.readString() + self.success.append(_elem865) iprot.readListEnd() else: iprot.skip(ftype) @@ -16494,8 +16728,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter857 in self.success: - oprot.writeString(iter857) + for iter866 in self.success: + oprot.writeString(iter866) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16600,10 +16834,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype861, _size858) = iprot.readListBegin() - for _i862 in xrange(_size858): - _elem863 = iprot.readString() - self.success.append(_elem863) + (_etype870, _size867) = iprot.readListBegin() + for _i871 in xrange(_size867): + _elem872 = iprot.readString() + self.success.append(_elem872) iprot.readListEnd() else: iprot.skip(ftype) @@ -16626,8 +16860,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter864 in self.success: - oprot.writeString(iter864) + for iter873 in self.success: + oprot.writeString(iter873) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17397,12 +17631,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype866, _vtype867, _size865 ) = iprot.readMapBegin() - for _i869 in xrange(_size865): - _key870 = iprot.readString() - _val871 = Type() - _val871.read(iprot) - self.success[_key870] = _val871 + (_ktype875, _vtype876, _size874 ) = iprot.readMapBegin() + for _i878 in xrange(_size874): + _key879 = iprot.readString() + _val880 = Type() + _val880.read(iprot) + self.success[_key879] = _val880 iprot.readMapEnd() else: iprot.skip(ftype) @@ -17425,9 +17659,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter872,viter873 in self.success.items(): - oprot.writeString(kiter872) - viter873.write(oprot) + for kiter881,viter882 in self.success.items(): + oprot.writeString(kiter881) + viter882.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -17570,11 +17804,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype877, _size874) = iprot.readListBegin() - for _i878 in xrange(_size874): - _elem879 = FieldSchema() - _elem879.read(iprot) - self.success.append(_elem879) + (_etype886, _size883) = iprot.readListBegin() + for _i887 in xrange(_size883): + _elem888 = FieldSchema() + _elem888.read(iprot) + self.success.append(_elem888) iprot.readListEnd() else: iprot.skip(ftype) @@ -17609,8 +17843,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter880 in self.success: - iter880.write(oprot) + for iter889 in self.success: + iter889.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17777,11 +18011,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype884, _size881) = iprot.readListBegin() - for _i885 in xrange(_size881): - _elem886 = FieldSchema() - _elem886.read(iprot) - self.success.append(_elem886) + (_etype893, _size890) = iprot.readListBegin() + for _i894 in xrange(_size890): + _elem895 = FieldSchema() + _elem895.read(iprot) + self.success.append(_elem895) iprot.readListEnd() else: iprot.skip(ftype) @@ -17816,8 +18050,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter887 in self.success: - iter887.write(oprot) + for iter896 in self.success: + iter896.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17970,11 +18204,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype891, _size888) = iprot.readListBegin() - for _i892 in xrange(_size888): - _elem893 = FieldSchema() - _elem893.read(iprot) - self.success.append(_elem893) + (_etype900, _size897) = iprot.readListBegin() + for _i901 in xrange(_size897): + _elem902 = FieldSchema() + _elem902.read(iprot) + self.success.append(_elem902) iprot.readListEnd() else: iprot.skip(ftype) @@ -18009,8 +18243,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter894 in self.success: - iter894.write(oprot) + for iter903 in self.success: + iter903.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18177,11 +18411,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype898, _size895) = iprot.readListBegin() - for _i899 in xrange(_size895): - _elem900 = FieldSchema() - _elem900.read(iprot) - self.success.append(_elem900) + (_etype907, _size904) = iprot.readListBegin() + for _i908 in xrange(_size904): + _elem909 = FieldSchema() + _elem909.read(iprot) + self.success.append(_elem909) iprot.readListEnd() else: iprot.skip(ftype) @@ -18216,8 +18450,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter901 in self.success: - iter901.write(oprot) + for iter910 in self.success: + iter910.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18670,66 +18904,66 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype905, _size902) = iprot.readListBegin() - for _i906 in xrange(_size902): - _elem907 = SQLPrimaryKey() - _elem907.read(iprot) - self.primaryKeys.append(_elem907) + (_etype914, _size911) = iprot.readListBegin() + for _i915 in xrange(_size911): + _elem916 = SQLPrimaryKey() + _elem916.read(iprot) + self.primaryKeys.append(_elem916) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype911, _size908) = iprot.readListBegin() - for _i912 in xrange(_size908): - _elem913 = SQLForeignKey() - _elem913.read(iprot) - self.foreignKeys.append(_elem913) + (_etype920, _size917) = iprot.readListBegin() + for _i921 in xrange(_size917): + _elem922 = SQLForeignKey() + _elem922.read(iprot) + self.foreignKeys.append(_elem922) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype917, _size914) = iprot.readListBegin() - for _i918 in xrange(_size914): - _elem919 = SQLUniqueConstraint() - _elem919.read(iprot) - self.uniqueConstraints.append(_elem919) + (_etype926, _size923) = iprot.readListBegin() + for _i927 in xrange(_size923): + _elem928 = SQLUniqueConstraint() + _elem928.read(iprot) + self.uniqueConstraints.append(_elem928) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype923, _size920) = iprot.readListBegin() - for _i924 in xrange(_size920): - _elem925 = SQLNotNullConstraint() - _elem925.read(iprot) - self.notNullConstraints.append(_elem925) + (_etype932, _size929) = iprot.readListBegin() + for _i933 in xrange(_size929): + _elem934 = SQLNotNullConstraint() + _elem934.read(iprot) + self.notNullConstraints.append(_elem934) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype929, _size926) = iprot.readListBegin() - for _i930 in xrange(_size926): - _elem931 = SQLDefaultConstraint() - _elem931.read(iprot) - self.defaultConstraints.append(_elem931) + (_etype938, _size935) = iprot.readListBegin() + for _i939 in xrange(_size935): + _elem940 = SQLDefaultConstraint() + _elem940.read(iprot) + self.defaultConstraints.append(_elem940) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.checkConstraints = [] - (_etype935, _size932) = iprot.readListBegin() - for _i936 in xrange(_size932): - _elem937 = SQLCheckConstraint() - _elem937.read(iprot) - self.checkConstraints.append(_elem937) + (_etype944, _size941) = iprot.readListBegin() + for _i945 in xrange(_size941): + _elem946 = SQLCheckConstraint() + _elem946.read(iprot) + self.checkConstraints.append(_elem946) iprot.readListEnd() else: iprot.skip(ftype) @@ -18750,43 +18984,43 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter938 in self.primaryKeys: - iter938.write(oprot) + for iter947 in self.primaryKeys: + iter947.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter939 in self.foreignKeys: - iter939.write(oprot) + for iter948 in self.foreignKeys: + iter948.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter940 in self.uniqueConstraints: - iter940.write(oprot) + for iter949 in self.uniqueConstraints: + iter949.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter941 in self.notNullConstraints: - iter941.write(oprot) + for iter950 in self.notNullConstraints: + iter950.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter942 in self.defaultConstraints: - iter942.write(oprot) + for iter951 in self.defaultConstraints: + iter951.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 7) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter943 in self.checkConstraints: - iter943.write(oprot) + for iter952 in self.checkConstraints: + iter952.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20346,10 +20580,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype947, _size944) = iprot.readListBegin() - for _i948 in xrange(_size944): - _elem949 = iprot.readString() - self.partNames.append(_elem949) + (_etype956, _size953) = iprot.readListBegin() + for _i957 in xrange(_size953): + _elem958 = iprot.readString() + self.partNames.append(_elem958) iprot.readListEnd() else: iprot.skip(ftype) @@ -20374,8 +20608,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter950 in self.partNames: - oprot.writeString(iter950) + for iter959 in self.partNames: + oprot.writeString(iter959) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20720,10 +20954,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype954, _size951) = iprot.readListBegin() - for _i955 in xrange(_size951): - _elem956 = iprot.readString() - self.success.append(_elem956) + (_etype963, _size960) = iprot.readListBegin() + for _i964 in xrange(_size960): + _elem965 = iprot.readString() + self.success.append(_elem965) iprot.readListEnd() else: iprot.skip(ftype) @@ -20746,8 +20980,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter957 in self.success: - oprot.writeString(iter957) + for iter966 in self.success: + oprot.writeString(iter966) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20897,10 +21131,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype961, _size958) = iprot.readListBegin() - for _i962 in xrange(_size958): - _elem963 = iprot.readString() - self.success.append(_elem963) + (_etype970, _size967) = iprot.readListBegin() + for _i971 in xrange(_size967): + _elem972 = iprot.readString() + self.success.append(_elem972) iprot.readListEnd() else: iprot.skip(ftype) @@ -20923,8 +21157,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter964 in self.success: - oprot.writeString(iter964) + for iter973 in self.success: + oprot.writeString(iter973) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21048,10 +21282,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype968, _size965) = iprot.readListBegin() - for _i969 in xrange(_size965): - _elem970 = iprot.readString() - self.success.append(_elem970) + (_etype977, _size974) = iprot.readListBegin() + for _i978 in xrange(_size974): + _elem979 = iprot.readString() + self.success.append(_elem979) iprot.readListEnd() else: iprot.skip(ftype) @@ -21074,8 +21308,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter971 in self.success: - oprot.writeString(iter971) + for iter980 in self.success: + oprot.writeString(iter980) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21148,10 +21382,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype975, _size972) = iprot.readListBegin() - for _i976 in xrange(_size972): - _elem977 = iprot.readString() - self.tbl_types.append(_elem977) + (_etype984, _size981) = iprot.readListBegin() + for _i985 in xrange(_size981): + _elem986 = iprot.readString() + self.tbl_types.append(_elem986) iprot.readListEnd() else: iprot.skip(ftype) @@ -21176,8 +21410,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter978 in self.tbl_types: - oprot.writeString(iter978) + for iter987 in self.tbl_types: + oprot.writeString(iter987) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21233,11 +21467,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype982, _size979) = iprot.readListBegin() - for _i983 in xrange(_size979): - _elem984 = TableMeta() - _elem984.read(iprot) - self.success.append(_elem984) + (_etype991, _size988) = iprot.readListBegin() + for _i992 in xrange(_size988): + _elem993 = TableMeta() + _elem993.read(iprot) + self.success.append(_elem993) iprot.readListEnd() else: iprot.skip(ftype) @@ -21260,8 +21494,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter985 in self.success: - iter985.write(oprot) + for iter994 in self.success: + iter994.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21385,10 +21619,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype989, _size986) = iprot.readListBegin() - for _i990 in xrange(_size986): - _elem991 = iprot.readString() - self.success.append(_elem991) + (_etype998, _size995) = iprot.readListBegin() + for _i999 in xrange(_size995): + _elem1000 = iprot.readString() + self.success.append(_elem1000) iprot.readListEnd() else: iprot.skip(ftype) @@ -21411,8 +21645,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter992 in self.success: - oprot.writeString(iter992) + for iter1001 in self.success: + oprot.writeString(iter1001) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21648,10 +21882,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype996, _size993) = iprot.readListBegin() - for _i997 in xrange(_size993): - _elem998 = iprot.readString() - self.tbl_names.append(_elem998) + (_etype1005, _size1002) = iprot.readListBegin() + for _i1006 in xrange(_size1002): + _elem1007 = iprot.readString() + self.tbl_names.append(_elem1007) iprot.readListEnd() else: iprot.skip(ftype) @@ -21672,8 +21906,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter999 in self.tbl_names: - oprot.writeString(iter999) + for iter1008 in self.tbl_names: + oprot.writeString(iter1008) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21725,11 +21959,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1003, _size1000) = iprot.readListBegin() - for _i1004 in xrange(_size1000): - _elem1005 = Table() - _elem1005.read(iprot) - self.success.append(_elem1005) + (_etype1012, _size1009) = iprot.readListBegin() + for _i1013 in xrange(_size1009): + _elem1014 = Table() + _elem1014.read(iprot) + self.success.append(_elem1014) iprot.readListEnd() else: iprot.skip(ftype) @@ -21746,8 +21980,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1006 in self.success: - iter1006.write(oprot) + for iter1015 in self.success: + iter1015.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22615,10 +22849,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1010, _size1007) = iprot.readListBegin() - for _i1011 in xrange(_size1007): - _elem1012 = iprot.readString() - self.success.append(_elem1012) + (_etype1019, _size1016) = iprot.readListBegin() + for _i1020 in xrange(_size1016): + _elem1021 = iprot.readString() + self.success.append(_elem1021) iprot.readListEnd() else: iprot.skip(ftype) @@ -22653,8 +22887,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1013 in self.success: - oprot.writeString(iter1013) + for iter1022 in self.success: + oprot.writeString(iter1022) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23783,11 +24017,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1017, _size1014) = iprot.readListBegin() - for _i1018 in xrange(_size1014): - _elem1019 = Partition() - _elem1019.read(iprot) - self.new_parts.append(_elem1019) + (_etype1026, _size1023) = iprot.readListBegin() + for _i1027 in xrange(_size1023): + _elem1028 = Partition() + _elem1028.read(iprot) + self.new_parts.append(_elem1028) iprot.readListEnd() else: iprot.skip(ftype) @@ -23804,8 +24038,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1020 in self.new_parts: - iter1020.write(oprot) + for iter1029 in self.new_parts: + iter1029.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23963,11 +24197,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1024, _size1021) = iprot.readListBegin() - for _i1025 in xrange(_size1021): - _elem1026 = PartitionSpec() - _elem1026.read(iprot) - self.new_parts.append(_elem1026) + (_etype1033, _size1030) = iprot.readListBegin() + for _i1034 in xrange(_size1030): + _elem1035 = PartitionSpec() + _elem1035.read(iprot) + self.new_parts.append(_elem1035) iprot.readListEnd() else: iprot.skip(ftype) @@ -23984,8 +24218,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1027 in self.new_parts: - iter1027.write(oprot) + for iter1036 in self.new_parts: + iter1036.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24159,10 +24393,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1031, _size1028) = iprot.readListBegin() - for _i1032 in xrange(_size1028): - _elem1033 = iprot.readString() - self.part_vals.append(_elem1033) + (_etype1040, _size1037) = iprot.readListBegin() + for _i1041 in xrange(_size1037): + _elem1042 = iprot.readString() + self.part_vals.append(_elem1042) iprot.readListEnd() else: iprot.skip(ftype) @@ -24187,8 +24421,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1034 in self.part_vals: - oprot.writeString(iter1034) + for iter1043 in self.part_vals: + oprot.writeString(iter1043) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24541,10 +24775,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1038, _size1035) = iprot.readListBegin() - for _i1039 in xrange(_size1035): - _elem1040 = iprot.readString() - self.part_vals.append(_elem1040) + (_etype1047, _size1044) = iprot.readListBegin() + for _i1048 in xrange(_size1044): + _elem1049 = iprot.readString() + self.part_vals.append(_elem1049) iprot.readListEnd() else: iprot.skip(ftype) @@ -24575,8 +24809,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1041 in self.part_vals: - oprot.writeString(iter1041) + for iter1050 in self.part_vals: + oprot.writeString(iter1050) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -25171,10 +25405,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1045, _size1042) = iprot.readListBegin() - for _i1046 in xrange(_size1042): - _elem1047 = iprot.readString() - self.part_vals.append(_elem1047) + (_etype1054, _size1051) = iprot.readListBegin() + for _i1055 in xrange(_size1051): + _elem1056 = iprot.readString() + self.part_vals.append(_elem1056) iprot.readListEnd() else: iprot.skip(ftype) @@ -25204,8 +25438,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1048 in self.part_vals: - oprot.writeString(iter1048) + for iter1057 in self.part_vals: + oprot.writeString(iter1057) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -25378,10 +25612,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1052, _size1049) = iprot.readListBegin() - for _i1053 in xrange(_size1049): - _elem1054 = iprot.readString() - self.part_vals.append(_elem1054) + (_etype1061, _size1058) = iprot.readListBegin() + for _i1062 in xrange(_size1058): + _elem1063 = iprot.readString() + self.part_vals.append(_elem1063) iprot.readListEnd() else: iprot.skip(ftype) @@ -25417,8 +25651,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1055 in self.part_vals: - oprot.writeString(iter1055) + for iter1064 in self.part_vals: + oprot.writeString(iter1064) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -26155,10 +26389,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1059, _size1056) = iprot.readListBegin() - for _i1060 in xrange(_size1056): - _elem1061 = iprot.readString() - self.part_vals.append(_elem1061) + (_etype1068, _size1065) = iprot.readListBegin() + for _i1069 in xrange(_size1065): + _elem1070 = iprot.readString() + self.part_vals.append(_elem1070) iprot.readListEnd() else: iprot.skip(ftype) @@ -26183,8 +26417,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1062 in self.part_vals: - oprot.writeString(iter1062) + for iter1071 in self.part_vals: + oprot.writeString(iter1071) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26343,11 +26577,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1064, _vtype1065, _size1063 ) = iprot.readMapBegin() - for _i1067 in xrange(_size1063): - _key1068 = iprot.readString() - _val1069 = iprot.readString() - self.partitionSpecs[_key1068] = _val1069 + (_ktype1073, _vtype1074, _size1072 ) = iprot.readMapBegin() + for _i1076 in xrange(_size1072): + _key1077 = iprot.readString() + _val1078 = iprot.readString() + self.partitionSpecs[_key1077] = _val1078 iprot.readMapEnd() else: iprot.skip(ftype) @@ -26384,9 +26618,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1070,viter1071 in self.partitionSpecs.items(): - oprot.writeString(kiter1070) - oprot.writeString(viter1071) + for kiter1079,viter1080 in self.partitionSpecs.items(): + oprot.writeString(kiter1079) + oprot.writeString(viter1080) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -26591,11 +26825,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1073, _vtype1074, _size1072 ) = iprot.readMapBegin() - for _i1076 in xrange(_size1072): - _key1077 = iprot.readString() - _val1078 = iprot.readString() - self.partitionSpecs[_key1077] = _val1078 + (_ktype1082, _vtype1083, _size1081 ) = iprot.readMapBegin() + for _i1085 in xrange(_size1081): + _key1086 = iprot.readString() + _val1087 = iprot.readString() + self.partitionSpecs[_key1086] = _val1087 iprot.readMapEnd() else: iprot.skip(ftype) @@ -26632,9 +26866,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1079,viter1080 in self.partitionSpecs.items(): - oprot.writeString(kiter1079) - oprot.writeString(viter1080) + for kiter1088,viter1089 in self.partitionSpecs.items(): + oprot.writeString(kiter1088) + oprot.writeString(viter1089) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -26717,11 +26951,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1084, _size1081) = iprot.readListBegin() - for _i1085 in xrange(_size1081): - _elem1086 = Partition() - _elem1086.read(iprot) - self.success.append(_elem1086) + (_etype1093, _size1090) = iprot.readListBegin() + for _i1094 in xrange(_size1090): + _elem1095 = Partition() + _elem1095.read(iprot) + self.success.append(_elem1095) iprot.readListEnd() else: iprot.skip(ftype) @@ -26762,8 +26996,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1087 in self.success: - iter1087.write(oprot) + for iter1096 in self.success: + iter1096.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26857,10 +27091,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1091, _size1088) = iprot.readListBegin() - for _i1092 in xrange(_size1088): - _elem1093 = iprot.readString() - self.part_vals.append(_elem1093) + (_etype1100, _size1097) = iprot.readListBegin() + for _i1101 in xrange(_size1097): + _elem1102 = iprot.readString() + self.part_vals.append(_elem1102) iprot.readListEnd() else: iprot.skip(ftype) @@ -26872,10 +27106,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1097, _size1094) = iprot.readListBegin() - for _i1098 in xrange(_size1094): - _elem1099 = iprot.readString() - self.group_names.append(_elem1099) + (_etype1106, _size1103) = iprot.readListBegin() + for _i1107 in xrange(_size1103): + _elem1108 = iprot.readString() + self.group_names.append(_elem1108) iprot.readListEnd() else: iprot.skip(ftype) @@ -26900,8 +27134,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1100 in self.part_vals: - oprot.writeString(iter1100) + for iter1109 in self.part_vals: + oprot.writeString(iter1109) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -26911,8 +27145,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1101 in self.group_names: - oprot.writeString(iter1101) + for iter1110 in self.group_names: + oprot.writeString(iter1110) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -27341,11 +27575,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1105, _size1102) = iprot.readListBegin() - for _i1106 in xrange(_size1102): - _elem1107 = Partition() - _elem1107.read(iprot) - self.success.append(_elem1107) + (_etype1114, _size1111) = iprot.readListBegin() + for _i1115 in xrange(_size1111): + _elem1116 = Partition() + _elem1116.read(iprot) + self.success.append(_elem1116) iprot.readListEnd() else: iprot.skip(ftype) @@ -27374,8 +27608,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1108 in self.success: - iter1108.write(oprot) + for iter1117 in self.success: + iter1117.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27469,10 +27703,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1112, _size1109) = iprot.readListBegin() - for _i1113 in xrange(_size1109): - _elem1114 = iprot.readString() - self.group_names.append(_elem1114) + (_etype1121, _size1118) = iprot.readListBegin() + for _i1122 in xrange(_size1118): + _elem1123 = iprot.readString() + self.group_names.append(_elem1123) iprot.readListEnd() else: iprot.skip(ftype) @@ -27505,8 +27739,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1115 in self.group_names: - oprot.writeString(iter1115) + for iter1124 in self.group_names: + oprot.writeString(iter1124) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -27567,11 +27801,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1119, _size1116) = iprot.readListBegin() - for _i1120 in xrange(_size1116): - _elem1121 = Partition() - _elem1121.read(iprot) - self.success.append(_elem1121) + (_etype1128, _size1125) = iprot.readListBegin() + for _i1129 in xrange(_size1125): + _elem1130 = Partition() + _elem1130.read(iprot) + self.success.append(_elem1130) iprot.readListEnd() else: iprot.skip(ftype) @@ -27600,8 +27834,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1122 in self.success: - iter1122.write(oprot) + for iter1131 in self.success: + iter1131.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27759,11 +27993,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1126, _size1123) = iprot.readListBegin() - for _i1127 in xrange(_size1123): - _elem1128 = PartitionSpec() - _elem1128.read(iprot) - self.success.append(_elem1128) + (_etype1135, _size1132) = iprot.readListBegin() + for _i1136 in xrange(_size1132): + _elem1137 = PartitionSpec() + _elem1137.read(iprot) + self.success.append(_elem1137) iprot.readListEnd() else: iprot.skip(ftype) @@ -27792,8 +28026,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1129 in self.success: - iter1129.write(oprot) + for iter1138 in self.success: + iter1138.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27951,10 +28185,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1133, _size1130) = iprot.readListBegin() - for _i1134 in xrange(_size1130): - _elem1135 = iprot.readString() - self.success.append(_elem1135) + (_etype1142, _size1139) = iprot.readListBegin() + for _i1143 in xrange(_size1139): + _elem1144 = iprot.readString() + self.success.append(_elem1144) iprot.readListEnd() else: iprot.skip(ftype) @@ -27983,8 +28217,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1136 in self.success: - oprot.writeString(iter1136) + for iter1145 in self.success: + oprot.writeString(iter1145) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28224,10 +28458,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1140, _size1137) = iprot.readListBegin() - for _i1141 in xrange(_size1137): - _elem1142 = iprot.readString() - self.part_vals.append(_elem1142) + (_etype1149, _size1146) = iprot.readListBegin() + for _i1150 in xrange(_size1146): + _elem1151 = iprot.readString() + self.part_vals.append(_elem1151) iprot.readListEnd() else: iprot.skip(ftype) @@ -28257,8 +28491,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1143 in self.part_vals: - oprot.writeString(iter1143) + for iter1152 in self.part_vals: + oprot.writeString(iter1152) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -28322,11 +28556,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1147, _size1144) = iprot.readListBegin() - for _i1148 in xrange(_size1144): - _elem1149 = Partition() - _elem1149.read(iprot) - self.success.append(_elem1149) + (_etype1156, _size1153) = iprot.readListBegin() + for _i1157 in xrange(_size1153): + _elem1158 = Partition() + _elem1158.read(iprot) + self.success.append(_elem1158) iprot.readListEnd() else: iprot.skip(ftype) @@ -28355,8 +28589,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1150 in self.success: - iter1150.write(oprot) + for iter1159 in self.success: + iter1159.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28443,10 +28677,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1154, _size1151) = iprot.readListBegin() - for _i1155 in xrange(_size1151): - _elem1156 = iprot.readString() - self.part_vals.append(_elem1156) + (_etype1163, _size1160) = iprot.readListBegin() + for _i1164 in xrange(_size1160): + _elem1165 = iprot.readString() + self.part_vals.append(_elem1165) iprot.readListEnd() else: iprot.skip(ftype) @@ -28463,10 +28697,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1160, _size1157) = iprot.readListBegin() - for _i1161 in xrange(_size1157): - _elem1162 = iprot.readString() - self.group_names.append(_elem1162) + (_etype1169, _size1166) = iprot.readListBegin() + for _i1170 in xrange(_size1166): + _elem1171 = iprot.readString() + self.group_names.append(_elem1171) iprot.readListEnd() else: iprot.skip(ftype) @@ -28491,8 +28725,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1163 in self.part_vals: - oprot.writeString(iter1163) + for iter1172 in self.part_vals: + oprot.writeString(iter1172) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -28506,8 +28740,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1164 in self.group_names: - oprot.writeString(iter1164) + for iter1173 in self.group_names: + oprot.writeString(iter1173) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -28569,11 +28803,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1168, _size1165) = iprot.readListBegin() - for _i1169 in xrange(_size1165): - _elem1170 = Partition() - _elem1170.read(iprot) - self.success.append(_elem1170) + (_etype1177, _size1174) = iprot.readListBegin() + for _i1178 in xrange(_size1174): + _elem1179 = Partition() + _elem1179.read(iprot) + self.success.append(_elem1179) iprot.readListEnd() else: iprot.skip(ftype) @@ -28602,8 +28836,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1171 in self.success: - iter1171.write(oprot) + for iter1180 in self.success: + iter1180.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28684,10 +28918,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1175, _size1172) = iprot.readListBegin() - for _i1176 in xrange(_size1172): - _elem1177 = iprot.readString() - self.part_vals.append(_elem1177) + (_etype1184, _size1181) = iprot.readListBegin() + for _i1185 in xrange(_size1181): + _elem1186 = iprot.readString() + self.part_vals.append(_elem1186) iprot.readListEnd() else: iprot.skip(ftype) @@ -28717,8 +28951,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1178 in self.part_vals: - oprot.writeString(iter1178) + for iter1187 in self.part_vals: + oprot.writeString(iter1187) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -28782,10 +29016,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1182, _size1179) = iprot.readListBegin() - for _i1183 in xrange(_size1179): - _elem1184 = iprot.readString() - self.success.append(_elem1184) + (_etype1191, _size1188) = iprot.readListBegin() + for _i1192 in xrange(_size1188): + _elem1193 = iprot.readString() + self.success.append(_elem1193) iprot.readListEnd() else: iprot.skip(ftype) @@ -28814,8 +29048,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1185 in self.success: - oprot.writeString(iter1185) + for iter1194 in self.success: + oprot.writeString(iter1194) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28986,11 +29220,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1189, _size1186) = iprot.readListBegin() - for _i1190 in xrange(_size1186): - _elem1191 = Partition() - _elem1191.read(iprot) - self.success.append(_elem1191) + (_etype1198, _size1195) = iprot.readListBegin() + for _i1199 in xrange(_size1195): + _elem1200 = Partition() + _elem1200.read(iprot) + self.success.append(_elem1200) iprot.readListEnd() else: iprot.skip(ftype) @@ -29019,8 +29253,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1192 in self.success: - iter1192.write(oprot) + for iter1201 in self.success: + iter1201.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29191,11 +29425,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1196, _size1193) = iprot.readListBegin() - for _i1197 in xrange(_size1193): - _elem1198 = PartitionSpec() - _elem1198.read(iprot) - self.success.append(_elem1198) + (_etype1205, _size1202) = iprot.readListBegin() + for _i1206 in xrange(_size1202): + _elem1207 = PartitionSpec() + _elem1207.read(iprot) + self.success.append(_elem1207) iprot.readListEnd() else: iprot.skip(ftype) @@ -29224,8 +29458,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1199 in self.success: - iter1199.write(oprot) + for iter1208 in self.success: + iter1208.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29645,10 +29879,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1203, _size1200) = iprot.readListBegin() - for _i1204 in xrange(_size1200): - _elem1205 = iprot.readString() - self.names.append(_elem1205) + (_etype1212, _size1209) = iprot.readListBegin() + for _i1213 in xrange(_size1209): + _elem1214 = iprot.readString() + self.names.append(_elem1214) iprot.readListEnd() else: iprot.skip(ftype) @@ -29673,8 +29907,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1206 in self.names: - oprot.writeString(iter1206) + for iter1215 in self.names: + oprot.writeString(iter1215) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -29733,11 +29967,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1210, _size1207) = iprot.readListBegin() - for _i1211 in xrange(_size1207): - _elem1212 = Partition() - _elem1212.read(iprot) - self.success.append(_elem1212) + (_etype1219, _size1216) = iprot.readListBegin() + for _i1220 in xrange(_size1216): + _elem1221 = Partition() + _elem1221.read(iprot) + self.success.append(_elem1221) iprot.readListEnd() else: iprot.skip(ftype) @@ -29766,8 +30000,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1213 in self.success: - iter1213.write(oprot) + for iter1222 in self.success: + iter1222.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30017,11 +30251,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1217, _size1214) = iprot.readListBegin() - for _i1218 in xrange(_size1214): - _elem1219 = Partition() - _elem1219.read(iprot) - self.new_parts.append(_elem1219) + (_etype1226, _size1223) = iprot.readListBegin() + for _i1227 in xrange(_size1223): + _elem1228 = Partition() + _elem1228.read(iprot) + self.new_parts.append(_elem1228) iprot.readListEnd() else: iprot.skip(ftype) @@ -30046,8 +30280,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1220 in self.new_parts: - iter1220.write(oprot) + for iter1229 in self.new_parts: + iter1229.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -30200,11 +30434,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1224, _size1221) = iprot.readListBegin() - for _i1225 in xrange(_size1221): - _elem1226 = Partition() - _elem1226.read(iprot) - self.new_parts.append(_elem1226) + (_etype1233, _size1230) = iprot.readListBegin() + for _i1234 in xrange(_size1230): + _elem1235 = Partition() + _elem1235.read(iprot) + self.new_parts.append(_elem1235) iprot.readListEnd() else: iprot.skip(ftype) @@ -30235,8 +30469,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1227 in self.new_parts: - iter1227.write(oprot) + for iter1236 in self.new_parts: + iter1236.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -30739,10 +30973,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1231, _size1228) = iprot.readListBegin() - for _i1232 in xrange(_size1228): - _elem1233 = iprot.readString() - self.part_vals.append(_elem1233) + (_etype1240, _size1237) = iprot.readListBegin() + for _i1241 in xrange(_size1237): + _elem1242 = iprot.readString() + self.part_vals.append(_elem1242) iprot.readListEnd() else: iprot.skip(ftype) @@ -30773,8 +31007,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1234 in self.part_vals: - oprot.writeString(iter1234) + for iter1243 in self.part_vals: + oprot.writeString(iter1243) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -31075,10 +31309,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1238, _size1235) = iprot.readListBegin() - for _i1239 in xrange(_size1235): - _elem1240 = iprot.readString() - self.part_vals.append(_elem1240) + (_etype1247, _size1244) = iprot.readListBegin() + for _i1248 in xrange(_size1244): + _elem1249 = iprot.readString() + self.part_vals.append(_elem1249) iprot.readListEnd() else: iprot.skip(ftype) @@ -31100,8 +31334,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1241 in self.part_vals: - oprot.writeString(iter1241) + for iter1250 in self.part_vals: + oprot.writeString(iter1250) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -31459,10 +31693,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1245, _size1242) = iprot.readListBegin() - for _i1246 in xrange(_size1242): - _elem1247 = iprot.readString() - self.success.append(_elem1247) + (_etype1254, _size1251) = iprot.readListBegin() + for _i1255 in xrange(_size1251): + _elem1256 = iprot.readString() + self.success.append(_elem1256) iprot.readListEnd() else: iprot.skip(ftype) @@ -31485,8 +31719,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1248 in self.success: - oprot.writeString(iter1248) + for iter1257 in self.success: + oprot.writeString(iter1257) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31610,11 +31844,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1250, _vtype1251, _size1249 ) = iprot.readMapBegin() - for _i1253 in xrange(_size1249): - _key1254 = iprot.readString() - _val1255 = iprot.readString() - self.success[_key1254] = _val1255 + (_ktype1259, _vtype1260, _size1258 ) = iprot.readMapBegin() + for _i1262 in xrange(_size1258): + _key1263 = iprot.readString() + _val1264 = iprot.readString() + self.success[_key1263] = _val1264 iprot.readMapEnd() else: iprot.skip(ftype) @@ -31637,9 +31871,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1256,viter1257 in self.success.items(): - oprot.writeString(kiter1256) - oprot.writeString(viter1257) + for kiter1265,viter1266 in self.success.items(): + oprot.writeString(kiter1265) + oprot.writeString(viter1266) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31715,11 +31949,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1259, _vtype1260, _size1258 ) = iprot.readMapBegin() - for _i1262 in xrange(_size1258): - _key1263 = iprot.readString() - _val1264 = iprot.readString() - self.part_vals[_key1263] = _val1264 + (_ktype1268, _vtype1269, _size1267 ) = iprot.readMapBegin() + for _i1271 in xrange(_size1267): + _key1272 = iprot.readString() + _val1273 = iprot.readString() + self.part_vals[_key1272] = _val1273 iprot.readMapEnd() else: iprot.skip(ftype) @@ -31749,9 +31983,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1265,viter1266 in self.part_vals.items(): - oprot.writeString(kiter1265) - oprot.writeString(viter1266) + for kiter1274,viter1275 in self.part_vals.items(): + oprot.writeString(kiter1274) + oprot.writeString(viter1275) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -31965,11 +32199,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1268, _vtype1269, _size1267 ) = iprot.readMapBegin() - for _i1271 in xrange(_size1267): - _key1272 = iprot.readString() - _val1273 = iprot.readString() - self.part_vals[_key1272] = _val1273 + (_ktype1277, _vtype1278, _size1276 ) = iprot.readMapBegin() + for _i1280 in xrange(_size1276): + _key1281 = iprot.readString() + _val1282 = iprot.readString() + self.part_vals[_key1281] = _val1282 iprot.readMapEnd() else: iprot.skip(ftype) @@ -31999,9 +32233,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1274,viter1275 in self.part_vals.items(): - oprot.writeString(kiter1274) - oprot.writeString(viter1275) + for kiter1283,viter1284 in self.part_vals.items(): + oprot.writeString(kiter1283) + oprot.writeString(viter1284) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -33882,25 +34116,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_table_column_statistics_args: +class update_table_basic_statistics_req_args: """ Attributes: - - db_name - - tbl_name - - col_name + - req """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'db_name', None, None, ), # 1 - (2, TType.STRING, 'tbl_name', None, None, ), # 2 - (3, TType.STRING, 'col_name', None, None, ), # 3 + (1, TType.STRUCT, 'req', (SetBasicStatsRequest, SetBasicStatsRequest.thrift_spec), None, ), # 1 ) - def __init__(self, db_name=None, tbl_name=None, col_name=None,): - self.db_name = db_name - self.tbl_name = tbl_name - self.col_name = col_name + def __init__(self, req=None,): + self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -33912,18 +34140,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.db_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.tbl_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.STRING: - self.col_name = iprot.readString() + if ftype == TType.STRUCT: + self.req = SetBasicStatsRequest() + self.req.read(iprot) else: iprot.skip(ftype) else: @@ -33935,18 +34154,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_table_column_statistics_args') - if self.db_name is not None: - oprot.writeFieldBegin('db_name', TType.STRING, 1) - oprot.writeString(self.db_name) - oprot.writeFieldEnd() - if self.tbl_name is not None: - oprot.writeFieldBegin('tbl_name', TType.STRING, 2) - oprot.writeString(self.tbl_name) - oprot.writeFieldEnd() - if self.col_name is not None: - oprot.writeFieldBegin('col_name', TType.STRING, 3) - oprot.writeString(self.col_name) + oprot.writeStructBegin('update_table_basic_statistics_req_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33957,9 +34168,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.db_name) - value = (value * 31) ^ hash(self.tbl_name) - value = (value * 31) ^ hash(self.col_name) + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -33973,7 +34182,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_table_column_statistics_result: +class update_table_basic_statistics_req_result: """ Attributes: - success @@ -33984,11 +34193,11 @@ class get_table_column_statistics_result: """ thrift_spec = ( - (0, TType.STRUCT, 'success', (ColumnStatistics, ColumnStatistics.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (SetBasicStatsResponse, SetBasicStatsResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 - (3, TType.STRUCT, 'o3', (InvalidInputException, InvalidInputException.thrift_spec), None, ), # 3 - (4, TType.STRUCT, 'o4', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 4 + (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + (4, TType.STRUCT, 'o4', (InvalidInputException, InvalidInputException.thrift_spec), None, ), # 4 ) def __init__(self, success=None, o1=None, o2=None, o3=None, o4=None,): @@ -34009,7 +34218,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = ColumnStatistics() + self.success = SetBasicStatsResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -34021,19 +34230,19 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o2 = InvalidObjectException() self.o2.read(iprot) else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: - self.o3 = InvalidInputException() + self.o3 = MetaException() self.o3.read(iprot) else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRUCT: - self.o4 = InvalidObjectException() + self.o4 = InvalidInputException() self.o4.read(iprot) else: iprot.skip(ftype) @@ -34046,7 +34255,593 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_table_column_statistics_result') + oprot.writeStructBegin('update_table_basic_statistics_req_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin('o4', TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) + value = (value * 31) ^ hash(self.o4) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class update_partition_basic_statistics_req_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (SetBasicStatsRequest, SetBasicStatsRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = SetBasicStatsRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('update_partition_basic_statistics_req_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class update_partition_basic_statistics_req_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (SetBasicStatsResponse, SetBasicStatsResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + (4, TType.STRUCT, 'o4', (InvalidInputException, InvalidInputException.thrift_spec), None, ), # 4 + ) + + def __init__(self, success=None, o1=None, o2=None, o3=None, o4=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = SetBasicStatsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidInputException() + self.o4.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('update_partition_basic_statistics_req_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin('o4', TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) + value = (value * 31) ^ hash(self.o4) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class invalidate_all_column_statistics_req_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (InvalidateColumnStatsRequest, InvalidateColumnStatsRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = InvalidateColumnStatsRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('invalidate_all_column_statistics_req_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class invalidate_all_column_statistics_req_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (InvalidateColumnStatsResponse, InvalidateColumnStatsResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + (4, TType.STRUCT, 'o4', (InvalidInputException, InvalidInputException.thrift_spec), None, ), # 4 + ) + + def __init__(self, success=None, o1=None, o2=None, o3=None, o4=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = InvalidateColumnStatsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidInputException() + self.o4.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('invalidate_all_column_statistics_req_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin('o4', TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) + value = (value * 31) ^ hash(self.o4) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_table_column_statistics_args: + """ + Attributes: + - db_name + - tbl_name + - col_name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'db_name', None, None, ), # 1 + (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (3, TType.STRING, 'col_name', None, None, ), # 3 + ) + + def __init__(self, db_name=None, tbl_name=None, col_name=None,): + self.db_name = db_name + self.tbl_name = tbl_name + self.col_name = col_name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.col_name = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_table_column_statistics_args') + if self.db_name is not None: + oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeString(self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeString(self.tbl_name) + oprot.writeFieldEnd() + if self.col_name is not None: + oprot.writeFieldBegin('col_name', TType.STRING, 3) + oprot.writeString(self.col_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.db_name) + value = (value * 31) ^ hash(self.tbl_name) + value = (value * 31) ^ hash(self.col_name) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_table_column_statistics_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (ColumnStatistics, ColumnStatistics.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (InvalidInputException, InvalidInputException.thrift_spec), None, ), # 3 + (4, TType.STRUCT, 'o4', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 4 + ) + + def __init__(self, success=None, o1=None, o2=None, o3=None, o4=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ColumnStatistics() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidInputException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidObjectException() + self.o4.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_table_column_statistics_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -36027,10 +36822,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1279, _size1276) = iprot.readListBegin() - for _i1280 in xrange(_size1276): - _elem1281 = iprot.readString() - self.success.append(_elem1281) + (_etype1288, _size1285) = iprot.readListBegin() + for _i1289 in xrange(_size1285): + _elem1290 = iprot.readString() + self.success.append(_elem1290) iprot.readListEnd() else: iprot.skip(ftype) @@ -36053,8 +36848,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1282 in self.success: - oprot.writeString(iter1282) + for iter1291 in self.success: + oprot.writeString(iter1291) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -36742,10 +37537,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1286, _size1283) = iprot.readListBegin() - for _i1287 in xrange(_size1283): - _elem1288 = iprot.readString() - self.success.append(_elem1288) + (_etype1295, _size1292) = iprot.readListBegin() + for _i1296 in xrange(_size1292): + _elem1297 = iprot.readString() + self.success.append(_elem1297) iprot.readListEnd() else: iprot.skip(ftype) @@ -36768,8 +37563,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1289 in self.success: - oprot.writeString(iter1289) + for iter1298 in self.success: + oprot.writeString(iter1298) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -37283,11 +38078,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1293, _size1290) = iprot.readListBegin() - for _i1294 in xrange(_size1290): - _elem1295 = Role() - _elem1295.read(iprot) - self.success.append(_elem1295) + (_etype1302, _size1299) = iprot.readListBegin() + for _i1303 in xrange(_size1299): + _elem1304 = Role() + _elem1304.read(iprot) + self.success.append(_elem1304) iprot.readListEnd() else: iprot.skip(ftype) @@ -37310,8 +38105,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1296 in self.success: - iter1296.write(oprot) + for iter1305 in self.success: + iter1305.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -37820,10 +38615,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1300, _size1297) = iprot.readListBegin() - for _i1301 in xrange(_size1297): - _elem1302 = iprot.readString() - self.group_names.append(_elem1302) + (_etype1309, _size1306) = iprot.readListBegin() + for _i1310 in xrange(_size1306): + _elem1311 = iprot.readString() + self.group_names.append(_elem1311) iprot.readListEnd() else: iprot.skip(ftype) @@ -37848,8 +38643,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1303 in self.group_names: - oprot.writeString(iter1303) + for iter1312 in self.group_names: + oprot.writeString(iter1312) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -38076,11 +38871,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1307, _size1304) = iprot.readListBegin() - for _i1308 in xrange(_size1304): - _elem1309 = HiveObjectPrivilege() - _elem1309.read(iprot) - self.success.append(_elem1309) + (_etype1316, _size1313) = iprot.readListBegin() + for _i1317 in xrange(_size1313): + _elem1318 = HiveObjectPrivilege() + _elem1318.read(iprot) + self.success.append(_elem1318) iprot.readListEnd() else: iprot.skip(ftype) @@ -38103,8 +38898,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1310 in self.success: - iter1310.write(oprot) + for iter1319 in self.success: + iter1319.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -38774,10 +39569,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1314, _size1311) = iprot.readListBegin() - for _i1315 in xrange(_size1311): - _elem1316 = iprot.readString() - self.group_names.append(_elem1316) + (_etype1323, _size1320) = iprot.readListBegin() + for _i1324 in xrange(_size1320): + _elem1325 = iprot.readString() + self.group_names.append(_elem1325) iprot.readListEnd() else: iprot.skip(ftype) @@ -38798,8 +39593,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1317 in self.group_names: - oprot.writeString(iter1317) + for iter1326 in self.group_names: + oprot.writeString(iter1326) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -38854,10 +39649,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1321, _size1318) = iprot.readListBegin() - for _i1322 in xrange(_size1318): - _elem1323 = iprot.readString() - self.success.append(_elem1323) + (_etype1330, _size1327) = iprot.readListBegin() + for _i1331 in xrange(_size1327): + _elem1332 = iprot.readString() + self.success.append(_elem1332) iprot.readListEnd() else: iprot.skip(ftype) @@ -38880,8 +39675,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1324 in self.success: - oprot.writeString(iter1324) + for iter1333 in self.success: + oprot.writeString(iter1333) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -39813,10 +40608,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1328, _size1325) = iprot.readListBegin() - for _i1329 in xrange(_size1325): - _elem1330 = iprot.readString() - self.success.append(_elem1330) + (_etype1337, _size1334) = iprot.readListBegin() + for _i1338 in xrange(_size1334): + _elem1339 = iprot.readString() + self.success.append(_elem1339) iprot.readListEnd() else: iprot.skip(ftype) @@ -39833,8 +40628,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1331 in self.success: - oprot.writeString(iter1331) + for iter1340 in self.success: + oprot.writeString(iter1340) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -40361,10 +41156,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1335, _size1332) = iprot.readListBegin() - for _i1336 in xrange(_size1332): - _elem1337 = iprot.readString() - self.success.append(_elem1337) + (_etype1344, _size1341) = iprot.readListBegin() + for _i1345 in xrange(_size1341): + _elem1346 = iprot.readString() + self.success.append(_elem1346) iprot.readListEnd() else: iprot.skip(ftype) @@ -40381,8 +41176,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1338 in self.success: - oprot.writeString(iter1338) + for iter1347 in self.success: + oprot.writeString(iter1347) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -48789,11 +49584,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1342, _size1339) = iprot.readListBegin() - for _i1343 in xrange(_size1339): - _elem1344 = SchemaVersion() - _elem1344.read(iprot) - self.success.append(_elem1344) + (_etype1351, _size1348) = iprot.readListBegin() + for _i1352 in xrange(_size1348): + _elem1353 = SchemaVersion() + _elem1353.read(iprot) + self.success.append(_elem1353) iprot.readListEnd() else: iprot.skip(ftype) @@ -48822,8 +49617,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1345 in self.success: - iter1345.write(oprot) + for iter1354 in self.success: + iter1354.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -50298,11 +51093,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1349, _size1346) = iprot.readListBegin() - for _i1350 in xrange(_size1346): - _elem1351 = RuntimeStat() - _elem1351.read(iprot) - self.success.append(_elem1351) + (_etype1358, _size1355) = iprot.readListBegin() + for _i1359 in xrange(_size1355): + _elem1360 = RuntimeStat() + _elem1360.read(iprot) + self.success.append(_elem1360) iprot.readListEnd() else: iprot.skip(ftype) @@ -50325,8 +51120,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1352 in self.success: - iter1352.write(oprot) + for iter1361 in self.success: + iter1361.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 7fc1e43de0..f512b7f1bf 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -6861,6 +6861,8 @@ class ColumnStatisticsObj: - colName - colType - statsData + - isStatsCompliant + - writeId """ thrift_spec = ( @@ -6868,12 +6870,16 @@ class ColumnStatisticsObj: (1, TType.STRING, 'colName', None, None, ), # 1 (2, TType.STRING, 'colType', None, None, ), # 2 (3, TType.STRUCT, 'statsData', (ColumnStatisticsData, ColumnStatisticsData.thrift_spec), None, ), # 3 + (4, TType.BOOL, 'isStatsCompliant', None, None, ), # 4 + (5, TType.I64, 'writeId', None, None, ), # 5 ) - def __init__(self, colName=None, colType=None, statsData=None,): + def __init__(self, colName=None, colType=None, statsData=None, isStatsCompliant=None, writeId=None,): self.colName = colName self.colType = colType self.statsData = statsData + self.isStatsCompliant = isStatsCompliant + self.writeId = writeId def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -6900,6 +6906,16 @@ def read(self, iprot): self.statsData.read(iprot) else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -6922,6 +6938,14 @@ def write(self, oprot): oprot.writeFieldBegin('statsData', TType.STRUCT, 3) self.statsData.write(oprot) oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 4) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 5) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -6940,6 +6964,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.colName) value = (value * 31) ^ hash(self.colType) value = (value * 31) ^ hash(self.statsData) + value = (value * 31) ^ hash(self.isStatsCompliant) + value = (value * 31) ^ hash(self.writeId) return value def __repr__(self): @@ -7094,20 +7120,17 @@ class ColumnStatistics: Attributes: - statsDesc - statsObj - - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'statsDesc', (ColumnStatisticsDesc, ColumnStatisticsDesc.thrift_spec), None, ), # 1 (2, TType.LIST, 'statsObj', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 2 - (3, TType.BOOL, 'isStatsCompliant', None, None, ), # 3 ) - def __init__(self, statsDesc=None, statsObj=None, isStatsCompliant=None,): + def __init__(self, statsDesc=None, statsObj=None,): self.statsDesc = statsDesc self.statsObj = statsObj - self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -7135,11 +7158,6 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) - elif fid == 3: - if ftype == TType.BOOL: - self.isStatsCompliant = iprot.readBool() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -7161,10 +7179,6 @@ def write(self, oprot): iter250.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() - if self.isStatsCompliant is not None: - oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 3) - oprot.writeBool(self.isStatsCompliant) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -7180,7 +7194,6 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.statsDesc) value = (value * 31) ^ hash(self.statsObj) - value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -7199,20 +7212,17 @@ class AggrStats: Attributes: - colStats - partsFound - - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 1 (2, TType.I64, 'partsFound', None, None, ), # 2 - (3, TType.BOOL, 'isStatsCompliant', None, None, ), # 3 ) - def __init__(self, colStats=None, partsFound=None, isStatsCompliant=None,): + def __init__(self, colStats=None, partsFound=None,): self.colStats = colStats self.partsFound = partsFound - self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -7239,11 +7249,6 @@ def read(self, iprot): self.partsFound = iprot.readI64() else: iprot.skip(ftype) - elif fid == 3: - if ftype == TType.BOOL: - self.isStatsCompliant = iprot.readBool() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -7265,10 +7270,6 @@ def write(self, oprot): oprot.writeFieldBegin('partsFound', TType.I64, 2) oprot.writeI64(self.partsFound) oprot.writeFieldEnd() - if self.isStatsCompliant is not None: - oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 3) - oprot.writeBool(self.isStatsCompliant) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -7284,7 +7285,6 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.colStats) value = (value * 31) ^ hash(self.partsFound) - value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -7480,6 +7480,393 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class SetBasicStatsRequest: + """ + Attributes: + - desc + - isValid + - legacyStats + - writeId + - validWriteIdList + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'desc', (ColumnStatisticsDesc, ColumnStatisticsDesc.thrift_spec), None, ), # 1 + (2, TType.BOOL, 'isValid', None, None, ), # 2 + (3, TType.MAP, 'legacyStats', (TType.STRING,None,TType.STRING,None), None, ), # 3 + (4, TType.I64, 'writeId', None, -1, ), # 4 + (5, TType.STRING, 'validWriteIdList', None, None, ), # 5 + ) + + def __init__(self, desc=None, isValid=None, legacyStats=None, writeId=thrift_spec[4][4], validWriteIdList=None,): + self.desc = desc + self.isValid = isValid + self.legacyStats = legacyStats + self.writeId = writeId + self.validWriteIdList = validWriteIdList + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.desc = ColumnStatisticsDesc() + self.desc.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.isValid = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.MAP: + self.legacyStats = {} + (_ktype266, _vtype267, _size265 ) = iprot.readMapBegin() + for _i269 in xrange(_size265): + _key270 = iprot.readString() + _val271 = iprot.readString() + self.legacyStats[_key270] = _val271 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('SetBasicStatsRequest') + if self.desc is not None: + oprot.writeFieldBegin('desc', TType.STRUCT, 1) + self.desc.write(oprot) + oprot.writeFieldEnd() + if self.isValid is not None: + oprot.writeFieldBegin('isValid', TType.BOOL, 2) + oprot.writeBool(self.isValid) + oprot.writeFieldEnd() + if self.legacyStats is not None: + oprot.writeFieldBegin('legacyStats', TType.MAP, 3) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.legacyStats)) + for kiter272,viter273 in self.legacyStats.items(): + oprot.writeString(kiter272) + oprot.writeString(viter273) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 4) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 5) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.desc is None: + raise TProtocol.TProtocolException(message='Required field desc is unset!') + if self.isValid is None: + raise TProtocol.TProtocolException(message='Required field isValid is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.desc) + value = (value * 31) ^ hash(self.isValid) + value = (value * 31) ^ hash(self.legacyStats) + value = (value * 31) ^ hash(self.writeId) + value = (value * 31) ^ hash(self.validWriteIdList) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class SetBasicStatsResponse: + """ + Attributes: + - result + """ + + thrift_spec = ( + None, # 0 + (1, TType.BOOL, 'result', None, None, ), # 1 + ) + + def __init__(self, result=None,): + self.result = result + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.BOOL: + self.result = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('SetBasicStatsResponse') + if self.result is not None: + oprot.writeFieldBegin('result', TType.BOOL, 1) + oprot.writeBool(self.result) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.result is None: + raise TProtocol.TProtocolException(message='Required field result is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.result) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class InvalidateColumnStatsRequest: + """ + Attributes: + - catName + - dbName + - tableName + - partName + - writeId + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'dbName', None, None, ), # 2 + (3, TType.STRING, 'tableName', None, None, ), # 3 + (4, TType.STRING, 'partName', None, None, ), # 4 + (5, TType.I64, 'writeId', None, -1, ), # 5 + ) + + def __init__(self, catName=None, dbName=None, tableName=None, partName=None, writeId=thrift_spec[5][4],): + self.catName = catName + self.dbName = dbName + self.tableName = tableName + self.partName = partName + self.writeId = writeId + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tableName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.partName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('InvalidateColumnStatsRequest') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 2) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin('tableName', TType.STRING, 3) + oprot.writeString(self.tableName) + oprot.writeFieldEnd() + if self.partName is not None: + oprot.writeFieldBegin('partName', TType.STRING, 4) + oprot.writeString(self.partName) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 5) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tableName is None: + raise TProtocol.TProtocolException(message='Required field tableName is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tableName) + value = (value * 31) ^ hash(self.partName) + value = (value * 31) ^ hash(self.writeId) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class InvalidateColumnStatsResponse: + """ + Attributes: + - result + """ + + thrift_spec = ( + None, # 0 + (1, TType.BOOL, 'result', None, None, ), # 1 + ) + + def __init__(self, result=None,): + self.result = result + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.BOOL: + self.result = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('InvalidateColumnStatsResponse') + if self.result is not None: + oprot.writeFieldBegin('result', TType.BOOL, 1) + oprot.writeBool(self.result) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.result is None: + raise TProtocol.TProtocolException(message='Required field result is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.result) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class Schema: """ Attributes: @@ -7509,22 +7896,22 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fieldSchemas = [] - (_etype268, _size265) = iprot.readListBegin() - for _i269 in xrange(_size265): - _elem270 = FieldSchema() - _elem270.read(iprot) - self.fieldSchemas.append(_elem270) + (_etype277, _size274) = iprot.readListBegin() + for _i278 in xrange(_size274): + _elem279 = FieldSchema() + _elem279.read(iprot) + self.fieldSchemas.append(_elem279) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.properties = {} - (_ktype272, _vtype273, _size271 ) = iprot.readMapBegin() - for _i275 in xrange(_size271): - _key276 = iprot.readString() - _val277 = iprot.readString() - self.properties[_key276] = _val277 + (_ktype281, _vtype282, _size280 ) = iprot.readMapBegin() + for _i284 in xrange(_size280): + _key285 = iprot.readString() + _val286 = iprot.readString() + self.properties[_key285] = _val286 iprot.readMapEnd() else: iprot.skip(ftype) @@ -7541,16 +7928,16 @@ def write(self, oprot): if self.fieldSchemas is not None: oprot.writeFieldBegin('fieldSchemas', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.fieldSchemas)) - for iter278 in self.fieldSchemas: - iter278.write(oprot) + for iter287 in self.fieldSchemas: + iter287.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter279,viter280 in self.properties.items(): - oprot.writeString(kiter279) - oprot.writeString(viter280) + for kiter288,viter289 in self.properties.items(): + oprot.writeString(kiter288) + oprot.writeString(viter289) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7603,11 +7990,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.properties = {} - (_ktype282, _vtype283, _size281 ) = iprot.readMapBegin() - for _i285 in xrange(_size281): - _key286 = iprot.readString() - _val287 = iprot.readString() - self.properties[_key286] = _val287 + (_ktype291, _vtype292, _size290 ) = iprot.readMapBegin() + for _i294 in xrange(_size290): + _key295 = iprot.readString() + _val296 = iprot.readString() + self.properties[_key295] = _val296 iprot.readMapEnd() else: iprot.skip(ftype) @@ -7624,9 +8011,9 @@ def write(self, oprot): if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter288,viter289 in self.properties.items(): - oprot.writeString(kiter288) - oprot.writeString(viter289) + for kiter297,viter298 in self.properties.items(): + oprot.writeString(kiter297) + oprot.writeString(viter298) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7773,11 +8160,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.primaryKeys = [] - (_etype293, _size290) = iprot.readListBegin() - for _i294 in xrange(_size290): - _elem295 = SQLPrimaryKey() - _elem295.read(iprot) - self.primaryKeys.append(_elem295) + (_etype302, _size299) = iprot.readListBegin() + for _i303 in xrange(_size299): + _elem304 = SQLPrimaryKey() + _elem304.read(iprot) + self.primaryKeys.append(_elem304) iprot.readListEnd() else: iprot.skip(ftype) @@ -7794,8 +8181,8 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter296 in self.primaryKeys: - iter296.write(oprot) + for iter305 in self.primaryKeys: + iter305.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7966,11 +8353,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.foreignKeys = [] - (_etype300, _size297) = iprot.readListBegin() - for _i301 in xrange(_size297): - _elem302 = SQLForeignKey() - _elem302.read(iprot) - self.foreignKeys.append(_elem302) + (_etype309, _size306) = iprot.readListBegin() + for _i310 in xrange(_size306): + _elem311 = SQLForeignKey() + _elem311.read(iprot) + self.foreignKeys.append(_elem311) iprot.readListEnd() else: iprot.skip(ftype) @@ -7987,8 +8374,8 @@ def write(self, oprot): if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter303 in self.foreignKeys: - iter303.write(oprot) + for iter312 in self.foreignKeys: + iter312.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8139,11 +8526,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype307, _size304) = iprot.readListBegin() - for _i308 in xrange(_size304): - _elem309 = SQLUniqueConstraint() - _elem309.read(iprot) - self.uniqueConstraints.append(_elem309) + (_etype316, _size313) = iprot.readListBegin() + for _i317 in xrange(_size313): + _elem318 = SQLUniqueConstraint() + _elem318.read(iprot) + self.uniqueConstraints.append(_elem318) iprot.readListEnd() else: iprot.skip(ftype) @@ -8160,8 +8547,8 @@ def write(self, oprot): if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter310 in self.uniqueConstraints: - iter310.write(oprot) + for iter319 in self.uniqueConstraints: + iter319.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8312,11 +8699,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype314, _size311) = iprot.readListBegin() - for _i315 in xrange(_size311): - _elem316 = SQLNotNullConstraint() - _elem316.read(iprot) - self.notNullConstraints.append(_elem316) + (_etype323, _size320) = iprot.readListBegin() + for _i324 in xrange(_size320): + _elem325 = SQLNotNullConstraint() + _elem325.read(iprot) + self.notNullConstraints.append(_elem325) iprot.readListEnd() else: iprot.skip(ftype) @@ -8333,8 +8720,8 @@ def write(self, oprot): if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter317 in self.notNullConstraints: - iter317.write(oprot) + for iter326 in self.notNullConstraints: + iter326.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8485,11 +8872,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype321, _size318) = iprot.readListBegin() - for _i322 in xrange(_size318): - _elem323 = SQLDefaultConstraint() - _elem323.read(iprot) - self.defaultConstraints.append(_elem323) + (_etype330, _size327) = iprot.readListBegin() + for _i331 in xrange(_size327): + _elem332 = SQLDefaultConstraint() + _elem332.read(iprot) + self.defaultConstraints.append(_elem332) iprot.readListEnd() else: iprot.skip(ftype) @@ -8506,8 +8893,8 @@ def write(self, oprot): if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter324 in self.defaultConstraints: - iter324.write(oprot) + for iter333 in self.defaultConstraints: + iter333.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8658,11 +9045,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.checkConstraints = [] - (_etype328, _size325) = iprot.readListBegin() - for _i329 in xrange(_size325): - _elem330 = SQLCheckConstraint() - _elem330.read(iprot) - self.checkConstraints.append(_elem330) + (_etype337, _size334) = iprot.readListBegin() + for _i338 in xrange(_size334): + _elem339 = SQLCheckConstraint() + _elem339.read(iprot) + self.checkConstraints.append(_elem339) iprot.readListEnd() else: iprot.skip(ftype) @@ -8679,8 +9066,8 @@ def write(self, oprot): if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter331 in self.checkConstraints: - iter331.write(oprot) + for iter340 in self.checkConstraints: + iter340.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8844,11 +9231,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.primaryKeyCols = [] - (_etype335, _size332) = iprot.readListBegin() - for _i336 in xrange(_size332): - _elem337 = SQLPrimaryKey() - _elem337.read(iprot) - self.primaryKeyCols.append(_elem337) + (_etype344, _size341) = iprot.readListBegin() + for _i345 in xrange(_size341): + _elem346 = SQLPrimaryKey() + _elem346.read(iprot) + self.primaryKeyCols.append(_elem346) iprot.readListEnd() else: iprot.skip(ftype) @@ -8865,8 +9252,8 @@ def write(self, oprot): if self.primaryKeyCols is not None: oprot.writeFieldBegin('primaryKeyCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeyCols)) - for iter338 in self.primaryKeyCols: - iter338.write(oprot) + for iter347 in self.primaryKeyCols: + iter347.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8920,11 +9307,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.foreignKeyCols = [] - (_etype342, _size339) = iprot.readListBegin() - for _i343 in xrange(_size339): - _elem344 = SQLForeignKey() - _elem344.read(iprot) - self.foreignKeyCols.append(_elem344) + (_etype351, _size348) = iprot.readListBegin() + for _i352 in xrange(_size348): + _elem353 = SQLForeignKey() + _elem353.read(iprot) + self.foreignKeyCols.append(_elem353) iprot.readListEnd() else: iprot.skip(ftype) @@ -8941,8 +9328,8 @@ def write(self, oprot): if self.foreignKeyCols is not None: oprot.writeFieldBegin('foreignKeyCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeyCols)) - for iter345 in self.foreignKeyCols: - iter345.write(oprot) + for iter354 in self.foreignKeyCols: + iter354.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8996,11 +9383,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.uniqueConstraintCols = [] - (_etype349, _size346) = iprot.readListBegin() - for _i350 in xrange(_size346): - _elem351 = SQLUniqueConstraint() - _elem351.read(iprot) - self.uniqueConstraintCols.append(_elem351) + (_etype358, _size355) = iprot.readListBegin() + for _i359 in xrange(_size355): + _elem360 = SQLUniqueConstraint() + _elem360.read(iprot) + self.uniqueConstraintCols.append(_elem360) iprot.readListEnd() else: iprot.skip(ftype) @@ -9017,8 +9404,8 @@ def write(self, oprot): if self.uniqueConstraintCols is not None: oprot.writeFieldBegin('uniqueConstraintCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraintCols)) - for iter352 in self.uniqueConstraintCols: - iter352.write(oprot) + for iter361 in self.uniqueConstraintCols: + iter361.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9072,11 +9459,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.notNullConstraintCols = [] - (_etype356, _size353) = iprot.readListBegin() - for _i357 in xrange(_size353): - _elem358 = SQLNotNullConstraint() - _elem358.read(iprot) - self.notNullConstraintCols.append(_elem358) + (_etype365, _size362) = iprot.readListBegin() + for _i366 in xrange(_size362): + _elem367 = SQLNotNullConstraint() + _elem367.read(iprot) + self.notNullConstraintCols.append(_elem367) iprot.readListEnd() else: iprot.skip(ftype) @@ -9093,8 +9480,8 @@ def write(self, oprot): if self.notNullConstraintCols is not None: oprot.writeFieldBegin('notNullConstraintCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraintCols)) - for iter359 in self.notNullConstraintCols: - iter359.write(oprot) + for iter368 in self.notNullConstraintCols: + iter368.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9148,11 +9535,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.defaultConstraintCols = [] - (_etype363, _size360) = iprot.readListBegin() - for _i364 in xrange(_size360): - _elem365 = SQLDefaultConstraint() - _elem365.read(iprot) - self.defaultConstraintCols.append(_elem365) + (_etype372, _size369) = iprot.readListBegin() + for _i373 in xrange(_size369): + _elem374 = SQLDefaultConstraint() + _elem374.read(iprot) + self.defaultConstraintCols.append(_elem374) iprot.readListEnd() else: iprot.skip(ftype) @@ -9169,8 +9556,8 @@ def write(self, oprot): if self.defaultConstraintCols is not None: oprot.writeFieldBegin('defaultConstraintCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraintCols)) - for iter366 in self.defaultConstraintCols: - iter366.write(oprot) + for iter375 in self.defaultConstraintCols: + iter375.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9224,11 +9611,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.checkConstraintCols = [] - (_etype370, _size367) = iprot.readListBegin() - for _i371 in xrange(_size367): - _elem372 = SQLCheckConstraint() - _elem372.read(iprot) - self.checkConstraintCols.append(_elem372) + (_etype379, _size376) = iprot.readListBegin() + for _i380 in xrange(_size376): + _elem381 = SQLCheckConstraint() + _elem381.read(iprot) + self.checkConstraintCols.append(_elem381) iprot.readListEnd() else: iprot.skip(ftype) @@ -9245,8 +9632,8 @@ def write(self, oprot): if self.checkConstraintCols is not None: oprot.writeFieldBegin('checkConstraintCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraintCols)) - for iter373 in self.checkConstraintCols: - iter373.write(oprot) + for iter382 in self.checkConstraintCols: + iter382.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9303,11 +9690,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype377, _size374) = iprot.readListBegin() - for _i378 in xrange(_size374): - _elem379 = Partition() - _elem379.read(iprot) - self.partitions.append(_elem379) + (_etype386, _size383) = iprot.readListBegin() + for _i387 in xrange(_size383): + _elem388 = Partition() + _elem388.read(iprot) + self.partitions.append(_elem388) iprot.readListEnd() else: iprot.skip(ftype) @@ -9329,8 +9716,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter380 in self.partitions: - iter380.write(oprot) + for iter389 in self.partitions: + iter389.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.hasUnknownPartitions is not None: @@ -9505,18 +9892,15 @@ class TableStatsResult: """ Attributes: - tableStats - - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.LIST, 'tableStats', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 1 - (2, TType.BOOL, 'isStatsCompliant', None, None, ), # 2 ) - def __init__(self, tableStats=None, isStatsCompliant=None,): + def __init__(self, tableStats=None,): self.tableStats = tableStats - self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9530,19 +9914,14 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tableStats = [] - (_etype384, _size381) = iprot.readListBegin() - for _i385 in xrange(_size381): - _elem386 = ColumnStatisticsObj() - _elem386.read(iprot) - self.tableStats.append(_elem386) + (_etype393, _size390) = iprot.readListBegin() + for _i394 in xrange(_size390): + _elem395 = ColumnStatisticsObj() + _elem395.read(iprot) + self.tableStats.append(_elem395) iprot.readListEnd() else: iprot.skip(ftype) - elif fid == 2: - if ftype == TType.BOOL: - self.isStatsCompliant = iprot.readBool() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9556,14 +9935,10 @@ def write(self, oprot): if self.tableStats is not None: oprot.writeFieldBegin('tableStats', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tableStats)) - for iter387 in self.tableStats: - iter387.write(oprot) + for iter396 in self.tableStats: + iter396.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() - if self.isStatsCompliant is not None: - oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 2) - oprot.writeBool(self.isStatsCompliant) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9576,7 +9951,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.tableStats) - value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -9594,18 +9968,15 @@ class PartitionsStatsResult: """ Attributes: - partStats - - isStatsCompliant """ thrift_spec = ( None, # 0 (1, TType.MAP, 'partStats', (TType.STRING,None,TType.LIST,(TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec))), None, ), # 1 - (2, TType.BOOL, 'isStatsCompliant', None, None, ), # 2 ) - def __init__(self, partStats=None, isStatsCompliant=None,): + def __init__(self, partStats=None,): self.partStats = partStats - self.isStatsCompliant = isStatsCompliant def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9619,25 +9990,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partStats = {} - (_ktype389, _vtype390, _size388 ) = iprot.readMapBegin() - for _i392 in xrange(_size388): - _key393 = iprot.readString() - _val394 = [] - (_etype398, _size395) = iprot.readListBegin() - for _i399 in xrange(_size395): - _elem400 = ColumnStatisticsObj() - _elem400.read(iprot) - _val394.append(_elem400) + (_ktype398, _vtype399, _size397 ) = iprot.readMapBegin() + for _i401 in xrange(_size397): + _key402 = iprot.readString() + _val403 = [] + (_etype407, _size404) = iprot.readListBegin() + for _i408 in xrange(_size404): + _elem409 = ColumnStatisticsObj() + _elem409.read(iprot) + _val403.append(_elem409) iprot.readListEnd() - self.partStats[_key393] = _val394 + self.partStats[_key402] = _val403 iprot.readMapEnd() else: iprot.skip(ftype) - elif fid == 2: - if ftype == TType.BOOL: - self.isStatsCompliant = iprot.readBool() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9651,18 +10017,14 @@ def write(self, oprot): if self.partStats is not None: oprot.writeFieldBegin('partStats', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.partStats)) - for kiter401,viter402 in self.partStats.items(): - oprot.writeString(kiter401) - oprot.writeListBegin(TType.STRUCT, len(viter402)) - for iter403 in viter402: - iter403.write(oprot) + for kiter410,viter411 in self.partStats.items(): + oprot.writeString(kiter410) + oprot.writeListBegin(TType.STRUCT, len(viter411)) + for iter412 in viter411: + iter412.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() - if self.isStatsCompliant is not None: - oprot.writeFieldBegin('isStatsCompliant', TType.BOOL, 2) - oprot.writeBool(self.isStatsCompliant) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9675,7 +10037,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.partStats) - value = (value * 31) ^ hash(self.isStatsCompliant) return value def __repr__(self): @@ -9737,10 +10098,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.colNames = [] - (_etype407, _size404) = iprot.readListBegin() - for _i408 in xrange(_size404): - _elem409 = iprot.readString() - self.colNames.append(_elem409) + (_etype416, _size413) = iprot.readListBegin() + for _i417 in xrange(_size413): + _elem418 = iprot.readString() + self.colNames.append(_elem418) iprot.readListEnd() else: iprot.skip(ftype) @@ -9775,8 +10136,8 @@ def write(self, oprot): if self.colNames is not None: oprot.writeFieldBegin('colNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.colNames)) - for iter410 in self.colNames: - oprot.writeString(iter410) + for iter419 in self.colNames: + oprot.writeString(iter419) oprot.writeListEnd() oprot.writeFieldEnd() if self.catName is not None: @@ -9871,20 +10232,20 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.colNames = [] - (_etype414, _size411) = iprot.readListBegin() - for _i415 in xrange(_size411): - _elem416 = iprot.readString() - self.colNames.append(_elem416) + (_etype423, _size420) = iprot.readListBegin() + for _i424 in xrange(_size420): + _elem425 = iprot.readString() + self.colNames.append(_elem425) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.partNames = [] - (_etype420, _size417) = iprot.readListBegin() - for _i421 in xrange(_size417): - _elem422 = iprot.readString() - self.partNames.append(_elem422) + (_etype429, _size426) = iprot.readListBegin() + for _i430 in xrange(_size426): + _elem431 = iprot.readString() + self.partNames.append(_elem431) iprot.readListEnd() else: iprot.skip(ftype) @@ -9919,15 +10280,15 @@ def write(self, oprot): if self.colNames is not None: oprot.writeFieldBegin('colNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.colNames)) - for iter423 in self.colNames: - oprot.writeString(iter423) + for iter432 in self.colNames: + oprot.writeString(iter432) oprot.writeListEnd() oprot.writeFieldEnd() if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter424 in self.partNames: - oprot.writeString(iter424) + for iter433 in self.partNames: + oprot.writeString(iter433) oprot.writeListEnd() oprot.writeFieldEnd() if self.catName is not None: @@ -10003,11 +10364,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype428, _size425) = iprot.readListBegin() - for _i429 in xrange(_size425): - _elem430 = Partition() - _elem430.read(iprot) - self.partitions.append(_elem430) + (_etype437, _size434) = iprot.readListBegin() + for _i438 in xrange(_size434): + _elem439 = Partition() + _elem439.read(iprot) + self.partitions.append(_elem439) iprot.readListEnd() else: iprot.skip(ftype) @@ -10029,8 +10390,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter431 in self.partitions: - iter431.write(oprot) + for iter440 in self.partitions: + iter440.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.isStatsCompliant is not None: @@ -10115,11 +10476,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.parts = [] - (_etype435, _size432) = iprot.readListBegin() - for _i436 in xrange(_size432): - _elem437 = Partition() - _elem437.read(iprot) - self.parts.append(_elem437) + (_etype444, _size441) = iprot.readListBegin() + for _i445 in xrange(_size441): + _elem446 = Partition() + _elem446.read(iprot) + self.parts.append(_elem446) iprot.readListEnd() else: iprot.skip(ftype) @@ -10164,8 +10525,8 @@ def write(self, oprot): if self.parts is not None: oprot.writeFieldBegin('parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.parts)) - for iter438 in self.parts: - iter438.write(oprot) + for iter447 in self.parts: + iter447.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.ifNotExists is not None: @@ -10247,11 +10608,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype442, _size439) = iprot.readListBegin() - for _i443 in xrange(_size439): - _elem444 = Partition() - _elem444.read(iprot) - self.partitions.append(_elem444) + (_etype451, _size448) = iprot.readListBegin() + for _i452 in xrange(_size448): + _elem453 = Partition() + _elem453.read(iprot) + self.partitions.append(_elem453) iprot.readListEnd() else: iprot.skip(ftype) @@ -10268,8 +10629,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter445 in self.partitions: - iter445.write(oprot) + for iter454 in self.partitions: + iter454.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10404,21 +10765,21 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.names = [] - (_etype449, _size446) = iprot.readListBegin() - for _i450 in xrange(_size446): - _elem451 = iprot.readString() - self.names.append(_elem451) + (_etype458, _size455) = iprot.readListBegin() + for _i459 in xrange(_size455): + _elem460 = iprot.readString() + self.names.append(_elem460) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.exprs = [] - (_etype455, _size452) = iprot.readListBegin() - for _i456 in xrange(_size452): - _elem457 = DropPartitionsExpr() - _elem457.read(iprot) - self.exprs.append(_elem457) + (_etype464, _size461) = iprot.readListBegin() + for _i465 in xrange(_size461): + _elem466 = DropPartitionsExpr() + _elem466.read(iprot) + self.exprs.append(_elem466) iprot.readListEnd() else: iprot.skip(ftype) @@ -10435,15 +10796,15 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter458 in self.names: - oprot.writeString(iter458) + for iter467 in self.names: + oprot.writeString(iter467) oprot.writeListEnd() oprot.writeFieldEnd() if self.exprs is not None: oprot.writeFieldBegin('exprs', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.exprs)) - for iter459 in self.exprs: - iter459.write(oprot) + for iter468 in self.exprs: + iter468.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10707,11 +11068,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partitionKeys = [] - (_etype463, _size460) = iprot.readListBegin() - for _i464 in xrange(_size460): - _elem465 = FieldSchema() - _elem465.read(iprot) - self.partitionKeys.append(_elem465) + (_etype472, _size469) = iprot.readListBegin() + for _i473 in xrange(_size469): + _elem474 = FieldSchema() + _elem474.read(iprot) + self.partitionKeys.append(_elem474) iprot.readListEnd() else: iprot.skip(ftype) @@ -10728,11 +11089,11 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.partitionOrder = [] - (_etype469, _size466) = iprot.readListBegin() - for _i470 in xrange(_size466): - _elem471 = FieldSchema() - _elem471.read(iprot) - self.partitionOrder.append(_elem471) + (_etype478, _size475) = iprot.readListBegin() + for _i479 in xrange(_size475): + _elem480 = FieldSchema() + _elem480.read(iprot) + self.partitionOrder.append(_elem480) iprot.readListEnd() else: iprot.skip(ftype) @@ -10772,8 +11133,8 @@ def write(self, oprot): if self.partitionKeys is not None: oprot.writeFieldBegin('partitionKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.partitionKeys)) - for iter472 in self.partitionKeys: - iter472.write(oprot) + for iter481 in self.partitionKeys: + iter481.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.applyDistinct is not None: @@ -10787,8 +11148,8 @@ def write(self, oprot): if self.partitionOrder is not None: oprot.writeFieldBegin('partitionOrder', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.partitionOrder)) - for iter473 in self.partitionOrder: - iter473.write(oprot) + for iter482 in self.partitionOrder: + iter482.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.ascending is not None: @@ -10866,10 +11227,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.row = [] - (_etype477, _size474) = iprot.readListBegin() - for _i478 in xrange(_size474): - _elem479 = iprot.readString() - self.row.append(_elem479) + (_etype486, _size483) = iprot.readListBegin() + for _i487 in xrange(_size483): + _elem488 = iprot.readString() + self.row.append(_elem488) iprot.readListEnd() else: iprot.skip(ftype) @@ -10886,8 +11247,8 @@ def write(self, oprot): if self.row is not None: oprot.writeFieldBegin('row', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.row)) - for iter480 in self.row: - oprot.writeString(iter480) + for iter489 in self.row: + oprot.writeString(iter489) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10941,11 +11302,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitionValues = [] - (_etype484, _size481) = iprot.readListBegin() - for _i485 in xrange(_size481): - _elem486 = PartitionValuesRow() - _elem486.read(iprot) - self.partitionValues.append(_elem486) + (_etype493, _size490) = iprot.readListBegin() + for _i494 in xrange(_size490): + _elem495 = PartitionValuesRow() + _elem495.read(iprot) + self.partitionValues.append(_elem495) iprot.readListEnd() else: iprot.skip(ftype) @@ -10962,8 +11323,8 @@ def write(self, oprot): if self.partitionValues is not None: oprot.writeFieldBegin('partitionValues', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitionValues)) - for iter487 in self.partitionValues: - iter487.write(oprot) + for iter496 in self.partitionValues: + iter496.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11154,11 +11515,11 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.resourceUris = [] - (_etype491, _size488) = iprot.readListBegin() - for _i492 in xrange(_size488): - _elem493 = ResourceUri() - _elem493.read(iprot) - self.resourceUris.append(_elem493) + (_etype500, _size497) = iprot.readListBegin() + for _i501 in xrange(_size497): + _elem502 = ResourceUri() + _elem502.read(iprot) + self.resourceUris.append(_elem502) iprot.readListEnd() else: iprot.skip(ftype) @@ -11208,8 +11569,8 @@ def write(self, oprot): if self.resourceUris is not None: oprot.writeFieldBegin('resourceUris', TType.LIST, 8) oprot.writeListBegin(TType.STRUCT, len(self.resourceUris)) - for iter494 in self.resourceUris: - iter494.write(oprot) + for iter503 in self.resourceUris: + iter503.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.catName is not None: @@ -11458,11 +11819,11 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.open_txns = [] - (_etype498, _size495) = iprot.readListBegin() - for _i499 in xrange(_size495): - _elem500 = TxnInfo() - _elem500.read(iprot) - self.open_txns.append(_elem500) + (_etype507, _size504) = iprot.readListBegin() + for _i508 in xrange(_size504): + _elem509 = TxnInfo() + _elem509.read(iprot) + self.open_txns.append(_elem509) iprot.readListEnd() else: iprot.skip(ftype) @@ -11483,8 +11844,8 @@ def write(self, oprot): if self.open_txns is not None: oprot.writeFieldBegin('open_txns', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.open_txns)) - for iter501 in self.open_txns: - iter501.write(oprot) + for iter510 in self.open_txns: + iter510.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11555,10 +11916,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.open_txns = [] - (_etype505, _size502) = iprot.readListBegin() - for _i506 in xrange(_size502): - _elem507 = iprot.readI64() - self.open_txns.append(_elem507) + (_etype514, _size511) = iprot.readListBegin() + for _i515 in xrange(_size511): + _elem516 = iprot.readI64() + self.open_txns.append(_elem516) iprot.readListEnd() else: iprot.skip(ftype) @@ -11589,8 +11950,8 @@ def write(self, oprot): if self.open_txns is not None: oprot.writeFieldBegin('open_txns', TType.LIST, 2) oprot.writeListBegin(TType.I64, len(self.open_txns)) - for iter508 in self.open_txns: - oprot.writeI64(iter508) + for iter517 in self.open_txns: + oprot.writeI64(iter517) oprot.writeListEnd() oprot.writeFieldEnd() if self.min_open_txn is not None: @@ -11699,10 +12060,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.replSrcTxnIds = [] - (_etype512, _size509) = iprot.readListBegin() - for _i513 in xrange(_size509): - _elem514 = iprot.readI64() - self.replSrcTxnIds.append(_elem514) + (_etype521, _size518) = iprot.readListBegin() + for _i522 in xrange(_size518): + _elem523 = iprot.readI64() + self.replSrcTxnIds.append(_elem523) iprot.readListEnd() else: iprot.skip(ftype) @@ -11739,8 +12100,8 @@ def write(self, oprot): if self.replSrcTxnIds is not None: oprot.writeFieldBegin('replSrcTxnIds', TType.LIST, 6) oprot.writeListBegin(TType.I64, len(self.replSrcTxnIds)) - for iter515 in self.replSrcTxnIds: - oprot.writeI64(iter515) + for iter524 in self.replSrcTxnIds: + oprot.writeI64(iter524) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11803,10 +12164,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txn_ids = [] - (_etype519, _size516) = iprot.readListBegin() - for _i520 in xrange(_size516): - _elem521 = iprot.readI64() - self.txn_ids.append(_elem521) + (_etype528, _size525) = iprot.readListBegin() + for _i529 in xrange(_size525): + _elem530 = iprot.readI64() + self.txn_ids.append(_elem530) iprot.readListEnd() else: iprot.skip(ftype) @@ -11823,8 +12184,8 @@ def write(self, oprot): if self.txn_ids is not None: oprot.writeFieldBegin('txn_ids', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.txn_ids)) - for iter522 in self.txn_ids: - oprot.writeI64(iter522) + for iter531 in self.txn_ids: + oprot.writeI64(iter531) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11958,10 +12319,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txn_ids = [] - (_etype526, _size523) = iprot.readListBegin() - for _i527 in xrange(_size523): - _elem528 = iprot.readI64() - self.txn_ids.append(_elem528) + (_etype535, _size532) = iprot.readListBegin() + for _i536 in xrange(_size532): + _elem537 = iprot.readI64() + self.txn_ids.append(_elem537) iprot.readListEnd() else: iprot.skip(ftype) @@ -11978,8 +12339,8 @@ def write(self, oprot): if self.txn_ids is not None: oprot.writeFieldBegin('txn_ids', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.txn_ids)) - for iter529 in self.txn_ids: - oprot.writeI64(iter529) + for iter538 in self.txn_ids: + oprot.writeI64(iter538) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12049,11 +12410,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.writeEventInfos = [] - (_etype533, _size530) = iprot.readListBegin() - for _i534 in xrange(_size530): - _elem535 = WriteEventInfo() - _elem535.read(iprot) - self.writeEventInfos.append(_elem535) + (_etype542, _size539) = iprot.readListBegin() + for _i543 in xrange(_size539): + _elem544 = WriteEventInfo() + _elem544.read(iprot) + self.writeEventInfos.append(_elem544) iprot.readListEnd() else: iprot.skip(ftype) @@ -12078,8 +12439,8 @@ def write(self, oprot): if self.writeEventInfos is not None: oprot.writeFieldBegin('writeEventInfos', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.writeEventInfos)) - for iter536 in self.writeEventInfos: - iter536.write(oprot) + for iter545 in self.writeEventInfos: + iter545.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12326,10 +12687,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.partNames = [] - (_etype540, _size537) = iprot.readListBegin() - for _i541 in xrange(_size537): - _elem542 = iprot.readString() - self.partNames.append(_elem542) + (_etype549, _size546) = iprot.readListBegin() + for _i550 in xrange(_size546): + _elem551 = iprot.readString() + self.partNames.append(_elem551) iprot.readListEnd() else: iprot.skip(ftype) @@ -12366,8 +12727,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter543 in self.partNames: - oprot.writeString(iter543) + for iter552 in self.partNames: + oprot.writeString(iter552) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12437,10 +12798,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fullTableNames = [] - (_etype547, _size544) = iprot.readListBegin() - for _i548 in xrange(_size544): - _elem549 = iprot.readString() - self.fullTableNames.append(_elem549) + (_etype556, _size553) = iprot.readListBegin() + for _i557 in xrange(_size553): + _elem558 = iprot.readString() + self.fullTableNames.append(_elem558) iprot.readListEnd() else: iprot.skip(ftype) @@ -12462,8 +12823,8 @@ def write(self, oprot): if self.fullTableNames is not None: oprot.writeFieldBegin('fullTableNames', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.fullTableNames)) - for iter550 in self.fullTableNames: - oprot.writeString(iter550) + for iter559 in self.fullTableNames: + oprot.writeString(iter559) oprot.writeListEnd() oprot.writeFieldEnd() if self.validTxnList is not None: @@ -12546,10 +12907,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.invalidWriteIds = [] - (_etype554, _size551) = iprot.readListBegin() - for _i555 in xrange(_size551): - _elem556 = iprot.readI64() - self.invalidWriteIds.append(_elem556) + (_etype563, _size560) = iprot.readListBegin() + for _i564 in xrange(_size560): + _elem565 = iprot.readI64() + self.invalidWriteIds.append(_elem565) iprot.readListEnd() else: iprot.skip(ftype) @@ -12584,8 +12945,8 @@ def write(self, oprot): if self.invalidWriteIds is not None: oprot.writeFieldBegin('invalidWriteIds', TType.LIST, 3) oprot.writeListBegin(TType.I64, len(self.invalidWriteIds)) - for iter557 in self.invalidWriteIds: - oprot.writeI64(iter557) + for iter566 in self.invalidWriteIds: + oprot.writeI64(iter566) oprot.writeListEnd() oprot.writeFieldEnd() if self.minOpenWriteId is not None: @@ -12657,11 +13018,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tblValidWriteIds = [] - (_etype561, _size558) = iprot.readListBegin() - for _i562 in xrange(_size558): - _elem563 = TableValidWriteIds() - _elem563.read(iprot) - self.tblValidWriteIds.append(_elem563) + (_etype570, _size567) = iprot.readListBegin() + for _i571 in xrange(_size567): + _elem572 = TableValidWriteIds() + _elem572.read(iprot) + self.tblValidWriteIds.append(_elem572) iprot.readListEnd() else: iprot.skip(ftype) @@ -12678,8 +13039,8 @@ def write(self, oprot): if self.tblValidWriteIds is not None: oprot.writeFieldBegin('tblValidWriteIds', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tblValidWriteIds)) - for iter564 in self.tblValidWriteIds: - iter564.write(oprot) + for iter573 in self.tblValidWriteIds: + iter573.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12755,10 +13116,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.txnIds = [] - (_etype568, _size565) = iprot.readListBegin() - for _i569 in xrange(_size565): - _elem570 = iprot.readI64() - self.txnIds.append(_elem570) + (_etype577, _size574) = iprot.readListBegin() + for _i578 in xrange(_size574): + _elem579 = iprot.readI64() + self.txnIds.append(_elem579) iprot.readListEnd() else: iprot.skip(ftype) @@ -12770,11 +13131,11 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.srcTxnToWriteIdList = [] - (_etype574, _size571) = iprot.readListBegin() - for _i575 in xrange(_size571): - _elem576 = TxnToWriteId() - _elem576.read(iprot) - self.srcTxnToWriteIdList.append(_elem576) + (_etype583, _size580) = iprot.readListBegin() + for _i584 in xrange(_size580): + _elem585 = TxnToWriteId() + _elem585.read(iprot) + self.srcTxnToWriteIdList.append(_elem585) iprot.readListEnd() else: iprot.skip(ftype) @@ -12799,8 +13160,8 @@ def write(self, oprot): if self.txnIds is not None: oprot.writeFieldBegin('txnIds', TType.LIST, 3) oprot.writeListBegin(TType.I64, len(self.txnIds)) - for iter577 in self.txnIds: - oprot.writeI64(iter577) + for iter586 in self.txnIds: + oprot.writeI64(iter586) oprot.writeListEnd() oprot.writeFieldEnd() if self.replPolicy is not None: @@ -12810,8 +13171,8 @@ def write(self, oprot): if self.srcTxnToWriteIdList is not None: oprot.writeFieldBegin('srcTxnToWriteIdList', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.srcTxnToWriteIdList)) - for iter578 in self.srcTxnToWriteIdList: - iter578.write(oprot) + for iter587 in self.srcTxnToWriteIdList: + iter587.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12953,11 +13314,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txnToWriteIds = [] - (_etype582, _size579) = iprot.readListBegin() - for _i583 in xrange(_size579): - _elem584 = TxnToWriteId() - _elem584.read(iprot) - self.txnToWriteIds.append(_elem584) + (_etype591, _size588) = iprot.readListBegin() + for _i592 in xrange(_size588): + _elem593 = TxnToWriteId() + _elem593.read(iprot) + self.txnToWriteIds.append(_elem593) iprot.readListEnd() else: iprot.skip(ftype) @@ -12974,8 +13335,8 @@ def write(self, oprot): if self.txnToWriteIds is not None: oprot.writeFieldBegin('txnToWriteIds', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.txnToWriteIds)) - for iter585 in self.txnToWriteIds: - iter585.write(oprot) + for iter594 in self.txnToWriteIds: + iter594.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13203,11 +13564,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.component = [] - (_etype589, _size586) = iprot.readListBegin() - for _i590 in xrange(_size586): - _elem591 = LockComponent() - _elem591.read(iprot) - self.component.append(_elem591) + (_etype598, _size595) = iprot.readListBegin() + for _i599 in xrange(_size595): + _elem600 = LockComponent() + _elem600.read(iprot) + self.component.append(_elem600) iprot.readListEnd() else: iprot.skip(ftype) @@ -13244,8 +13605,8 @@ def write(self, oprot): if self.component is not None: oprot.writeFieldBegin('component', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.component)) - for iter592 in self.component: - iter592.write(oprot) + for iter601 in self.component: + iter601.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.txnid is not None: @@ -13943,11 +14304,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.locks = [] - (_etype596, _size593) = iprot.readListBegin() - for _i597 in xrange(_size593): - _elem598 = ShowLocksResponseElement() - _elem598.read(iprot) - self.locks.append(_elem598) + (_etype605, _size602) = iprot.readListBegin() + for _i606 in xrange(_size602): + _elem607 = ShowLocksResponseElement() + _elem607.read(iprot) + self.locks.append(_elem607) iprot.readListEnd() else: iprot.skip(ftype) @@ -13964,8 +14325,8 @@ def write(self, oprot): if self.locks is not None: oprot.writeFieldBegin('locks', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.locks)) - for iter599 in self.locks: - iter599.write(oprot) + for iter608 in self.locks: + iter608.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14180,20 +14541,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.SET: self.aborted = set() - (_etype603, _size600) = iprot.readSetBegin() - for _i604 in xrange(_size600): - _elem605 = iprot.readI64() - self.aborted.add(_elem605) + (_etype612, _size609) = iprot.readSetBegin() + for _i613 in xrange(_size609): + _elem614 = iprot.readI64() + self.aborted.add(_elem614) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.SET: self.nosuch = set() - (_etype609, _size606) = iprot.readSetBegin() - for _i610 in xrange(_size606): - _elem611 = iprot.readI64() - self.nosuch.add(_elem611) + (_etype618, _size615) = iprot.readSetBegin() + for _i619 in xrange(_size615): + _elem620 = iprot.readI64() + self.nosuch.add(_elem620) iprot.readSetEnd() else: iprot.skip(ftype) @@ -14210,15 +14571,15 @@ def write(self, oprot): if self.aborted is not None: oprot.writeFieldBegin('aborted', TType.SET, 1) oprot.writeSetBegin(TType.I64, len(self.aborted)) - for iter612 in self.aborted: - oprot.writeI64(iter612) + for iter621 in self.aborted: + oprot.writeI64(iter621) oprot.writeSetEnd() oprot.writeFieldEnd() if self.nosuch is not None: oprot.writeFieldBegin('nosuch', TType.SET, 2) oprot.writeSetBegin(TType.I64, len(self.nosuch)) - for iter613 in self.nosuch: - oprot.writeI64(iter613) + for iter622 in self.nosuch: + oprot.writeI64(iter622) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14315,11 +14676,11 @@ def read(self, iprot): elif fid == 6: if ftype == TType.MAP: self.properties = {} - (_ktype615, _vtype616, _size614 ) = iprot.readMapBegin() - for _i618 in xrange(_size614): - _key619 = iprot.readString() - _val620 = iprot.readString() - self.properties[_key619] = _val620 + (_ktype624, _vtype625, _size623 ) = iprot.readMapBegin() + for _i627 in xrange(_size623): + _key628 = iprot.readString() + _val629 = iprot.readString() + self.properties[_key628] = _val629 iprot.readMapEnd() else: iprot.skip(ftype) @@ -14356,9 +14717,9 @@ def write(self, oprot): if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 6) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter621,viter622 in self.properties.items(): - oprot.writeString(kiter621) - oprot.writeString(viter622) + for kiter630,viter631 in self.properties.items(): + oprot.writeString(kiter630) + oprot.writeString(viter631) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14793,11 +15154,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.compacts = [] - (_etype626, _size623) = iprot.readListBegin() - for _i627 in xrange(_size623): - _elem628 = ShowCompactResponseElement() - _elem628.read(iprot) - self.compacts.append(_elem628) + (_etype635, _size632) = iprot.readListBegin() + for _i636 in xrange(_size632): + _elem637 = ShowCompactResponseElement() + _elem637.read(iprot) + self.compacts.append(_elem637) iprot.readListEnd() else: iprot.skip(ftype) @@ -14814,8 +15175,8 @@ def write(self, oprot): if self.compacts is not None: oprot.writeFieldBegin('compacts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.compacts)) - for iter629 in self.compacts: - iter629.write(oprot) + for iter638 in self.compacts: + iter638.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14904,10 +15265,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionnames = [] - (_etype633, _size630) = iprot.readListBegin() - for _i634 in xrange(_size630): - _elem635 = iprot.readString() - self.partitionnames.append(_elem635) + (_etype642, _size639) = iprot.readListBegin() + for _i643 in xrange(_size639): + _elem644 = iprot.readString() + self.partitionnames.append(_elem644) iprot.readListEnd() else: iprot.skip(ftype) @@ -14945,8 +15306,8 @@ def write(self, oprot): if self.partitionnames is not None: oprot.writeFieldBegin('partitionnames', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionnames)) - for iter636 in self.partitionnames: - oprot.writeString(iter636) + for iter645 in self.partitionnames: + oprot.writeString(iter645) oprot.writeListEnd() oprot.writeFieldEnd() if self.operationType is not None: @@ -15179,10 +15540,10 @@ def read(self, iprot): elif fid == 4: if ftype == TType.SET: self.tablesUsed = set() - (_etype640, _size637) = iprot.readSetBegin() - for _i641 in xrange(_size637): - _elem642 = iprot.readString() - self.tablesUsed.add(_elem642) + (_etype649, _size646) = iprot.readSetBegin() + for _i650 in xrange(_size646): + _elem651 = iprot.readString() + self.tablesUsed.add(_elem651) iprot.readSetEnd() else: iprot.skip(ftype) @@ -15221,8 +15582,8 @@ def write(self, oprot): if self.tablesUsed is not None: oprot.writeFieldBegin('tablesUsed', TType.SET, 4) oprot.writeSetBegin(TType.STRING, len(self.tablesUsed)) - for iter643 in self.tablesUsed: - oprot.writeString(iter643) + for iter652 in self.tablesUsed: + oprot.writeString(iter652) oprot.writeSetEnd() oprot.writeFieldEnd() if self.validTxnList is not None: @@ -15539,11 +15900,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.events = [] - (_etype647, _size644) = iprot.readListBegin() - for _i648 in xrange(_size644): - _elem649 = NotificationEvent() - _elem649.read(iprot) - self.events.append(_elem649) + (_etype656, _size653) = iprot.readListBegin() + for _i657 in xrange(_size653): + _elem658 = NotificationEvent() + _elem658.read(iprot) + self.events.append(_elem658) iprot.readListEnd() else: iprot.skip(ftype) @@ -15560,8 +15921,8 @@ def write(self, oprot): if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.events)) - for iter650 in self.events: - iter650.write(oprot) + for iter659 in self.events: + iter659.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15858,30 +16219,30 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.filesAdded = [] - (_etype654, _size651) = iprot.readListBegin() - for _i655 in xrange(_size651): - _elem656 = iprot.readString() - self.filesAdded.append(_elem656) + (_etype663, _size660) = iprot.readListBegin() + for _i664 in xrange(_size660): + _elem665 = iprot.readString() + self.filesAdded.append(_elem665) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.filesAddedChecksum = [] - (_etype660, _size657) = iprot.readListBegin() - for _i661 in xrange(_size657): - _elem662 = iprot.readString() - self.filesAddedChecksum.append(_elem662) + (_etype669, _size666) = iprot.readListBegin() + for _i670 in xrange(_size666): + _elem671 = iprot.readString() + self.filesAddedChecksum.append(_elem671) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.subDirectoryList = [] - (_etype666, _size663) = iprot.readListBegin() - for _i667 in xrange(_size663): - _elem668 = iprot.readString() - self.subDirectoryList.append(_elem668) + (_etype675, _size672) = iprot.readListBegin() + for _i676 in xrange(_size672): + _elem677 = iprot.readString() + self.subDirectoryList.append(_elem677) iprot.readListEnd() else: iprot.skip(ftype) @@ -15902,22 +16263,22 @@ def write(self, oprot): if self.filesAdded is not None: oprot.writeFieldBegin('filesAdded', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.filesAdded)) - for iter669 in self.filesAdded: - oprot.writeString(iter669) + for iter678 in self.filesAdded: + oprot.writeString(iter678) oprot.writeListEnd() oprot.writeFieldEnd() if self.filesAddedChecksum is not None: oprot.writeFieldBegin('filesAddedChecksum', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.filesAddedChecksum)) - for iter670 in self.filesAddedChecksum: - oprot.writeString(iter670) + for iter679 in self.filesAddedChecksum: + oprot.writeString(iter679) oprot.writeListEnd() oprot.writeFieldEnd() if self.subDirectoryList is not None: oprot.writeFieldBegin('subDirectoryList', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.subDirectoryList)) - for iter671 in self.subDirectoryList: - oprot.writeString(iter671) + for iter680 in self.subDirectoryList: + oprot.writeString(iter680) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16076,10 +16437,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionVals = [] - (_etype675, _size672) = iprot.readListBegin() - for _i676 in xrange(_size672): - _elem677 = iprot.readString() - self.partitionVals.append(_elem677) + (_etype684, _size681) = iprot.readListBegin() + for _i685 in xrange(_size681): + _elem686 = iprot.readString() + self.partitionVals.append(_elem686) iprot.readListEnd() else: iprot.skip(ftype) @@ -16117,8 +16478,8 @@ def write(self, oprot): if self.partitionVals is not None: oprot.writeFieldBegin('partitionVals', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionVals)) - for iter678 in self.partitionVals: - oprot.writeString(iter678) + for iter687 in self.partitionVals: + oprot.writeString(iter687) oprot.writeListEnd() oprot.writeFieldEnd() if self.catName is not None: @@ -16270,10 +16631,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.partitionVals = [] - (_etype682, _size679) = iprot.readListBegin() - for _i683 in xrange(_size679): - _elem684 = iprot.readString() - self.partitionVals.append(_elem684) + (_etype691, _size688) = iprot.readListBegin() + for _i692 in xrange(_size688): + _elem693 = iprot.readString() + self.partitionVals.append(_elem693) iprot.readListEnd() else: iprot.skip(ftype) @@ -16310,8 +16671,8 @@ def write(self, oprot): if self.partitionVals is not None: oprot.writeFieldBegin('partitionVals', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.partitionVals)) - for iter685 in self.partitionVals: - oprot.writeString(iter685) + for iter694 in self.partitionVals: + oprot.writeString(iter694) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16505,12 +16866,12 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype687, _vtype688, _size686 ) = iprot.readMapBegin() - for _i690 in xrange(_size686): - _key691 = iprot.readI64() - _val692 = MetadataPpdResult() - _val692.read(iprot) - self.metadata[_key691] = _val692 + (_ktype696, _vtype697, _size695 ) = iprot.readMapBegin() + for _i699 in xrange(_size695): + _key700 = iprot.readI64() + _val701 = MetadataPpdResult() + _val701.read(iprot) + self.metadata[_key700] = _val701 iprot.readMapEnd() else: iprot.skip(ftype) @@ -16532,9 +16893,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.metadata)) - for kiter693,viter694 in self.metadata.items(): - oprot.writeI64(kiter693) - viter694.write(oprot) + for kiter702,viter703 in self.metadata.items(): + oprot.writeI64(kiter702) + viter703.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -16604,10 +16965,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype698, _size695) = iprot.readListBegin() - for _i699 in xrange(_size695): - _elem700 = iprot.readI64() - self.fileIds.append(_elem700) + (_etype707, _size704) = iprot.readListBegin() + for _i708 in xrange(_size704): + _elem709 = iprot.readI64() + self.fileIds.append(_elem709) iprot.readListEnd() else: iprot.skip(ftype) @@ -16639,8 +17000,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter701 in self.fileIds: - oprot.writeI64(iter701) + for iter710 in self.fileIds: + oprot.writeI64(iter710) oprot.writeListEnd() oprot.writeFieldEnd() if self.expr is not None: @@ -16714,11 +17075,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype703, _vtype704, _size702 ) = iprot.readMapBegin() - for _i706 in xrange(_size702): - _key707 = iprot.readI64() - _val708 = iprot.readString() - self.metadata[_key707] = _val708 + (_ktype712, _vtype713, _size711 ) = iprot.readMapBegin() + for _i715 in xrange(_size711): + _key716 = iprot.readI64() + _val717 = iprot.readString() + self.metadata[_key716] = _val717 iprot.readMapEnd() else: iprot.skip(ftype) @@ -16740,9 +17101,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRING, len(self.metadata)) - for kiter709,viter710 in self.metadata.items(): - oprot.writeI64(kiter709) - oprot.writeString(viter710) + for kiter718,viter719 in self.metadata.items(): + oprot.writeI64(kiter718) + oprot.writeString(viter719) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -16803,10 +17164,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype714, _size711) = iprot.readListBegin() - for _i715 in xrange(_size711): - _elem716 = iprot.readI64() - self.fileIds.append(_elem716) + (_etype723, _size720) = iprot.readListBegin() + for _i724 in xrange(_size720): + _elem725 = iprot.readI64() + self.fileIds.append(_elem725) iprot.readListEnd() else: iprot.skip(ftype) @@ -16823,8 +17184,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter717 in self.fileIds: - oprot.writeI64(iter717) + for iter726 in self.fileIds: + oprot.writeI64(iter726) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16930,20 +17291,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype721, _size718) = iprot.readListBegin() - for _i722 in xrange(_size718): - _elem723 = iprot.readI64() - self.fileIds.append(_elem723) + (_etype730, _size727) = iprot.readListBegin() + for _i731 in xrange(_size727): + _elem732 = iprot.readI64() + self.fileIds.append(_elem732) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.metadata = [] - (_etype727, _size724) = iprot.readListBegin() - for _i728 in xrange(_size724): - _elem729 = iprot.readString() - self.metadata.append(_elem729) + (_etype736, _size733) = iprot.readListBegin() + for _i737 in xrange(_size733): + _elem738 = iprot.readString() + self.metadata.append(_elem738) iprot.readListEnd() else: iprot.skip(ftype) @@ -16965,15 +17326,15 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter730 in self.fileIds: - oprot.writeI64(iter730) + for iter739 in self.fileIds: + oprot.writeI64(iter739) oprot.writeListEnd() oprot.writeFieldEnd() if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.metadata)) - for iter731 in self.metadata: - oprot.writeString(iter731) + for iter740 in self.metadata: + oprot.writeString(iter740) oprot.writeListEnd() oprot.writeFieldEnd() if self.type is not None: @@ -17081,10 +17442,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype735, _size732) = iprot.readListBegin() - for _i736 in xrange(_size732): - _elem737 = iprot.readI64() - self.fileIds.append(_elem737) + (_etype744, _size741) = iprot.readListBegin() + for _i745 in xrange(_size741): + _elem746 = iprot.readI64() + self.fileIds.append(_elem746) iprot.readListEnd() else: iprot.skip(ftype) @@ -17101,8 +17462,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter738 in self.fileIds: - oprot.writeI64(iter738) + for iter747 in self.fileIds: + oprot.writeI64(iter747) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17331,11 +17692,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.functions = [] - (_etype742, _size739) = iprot.readListBegin() - for _i743 in xrange(_size739): - _elem744 = Function() - _elem744.read(iprot) - self.functions.append(_elem744) + (_etype751, _size748) = iprot.readListBegin() + for _i752 in xrange(_size748): + _elem753 = Function() + _elem753.read(iprot) + self.functions.append(_elem753) iprot.readListEnd() else: iprot.skip(ftype) @@ -17352,8 +17713,8 @@ def write(self, oprot): if self.functions is not None: oprot.writeFieldBegin('functions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.functions)) - for iter745 in self.functions: - iter745.write(oprot) + for iter754 in self.functions: + iter754.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17405,10 +17766,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype749, _size746) = iprot.readListBegin() - for _i750 in xrange(_size746): - _elem751 = iprot.readI32() - self.values.append(_elem751) + (_etype758, _size755) = iprot.readListBegin() + for _i759 in xrange(_size755): + _elem760 = iprot.readI32() + self.values.append(_elem760) iprot.readListEnd() else: iprot.skip(ftype) @@ -17425,8 +17786,8 @@ def write(self, oprot): if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.I32, len(self.values)) - for iter752 in self.values: - oprot.writeI32(iter752) + for iter761 in self.values: + oprot.writeI32(iter761) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17698,10 +18059,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tblNames = [] - (_etype756, _size753) = iprot.readListBegin() - for _i757 in xrange(_size753): - _elem758 = iprot.readString() - self.tblNames.append(_elem758) + (_etype765, _size762) = iprot.readListBegin() + for _i766 in xrange(_size762): + _elem767 = iprot.readString() + self.tblNames.append(_elem767) iprot.readListEnd() else: iprot.skip(ftype) @@ -17733,8 +18094,8 @@ def write(self, oprot): if self.tblNames is not None: oprot.writeFieldBegin('tblNames', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tblNames)) - for iter759 in self.tblNames: - oprot.writeString(iter759) + for iter768 in self.tblNames: + oprot.writeString(iter768) oprot.writeListEnd() oprot.writeFieldEnd() if self.capabilities is not None: @@ -17799,11 +18160,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tables = [] - (_etype763, _size760) = iprot.readListBegin() - for _i764 in xrange(_size760): - _elem765 = Table() - _elem765.read(iprot) - self.tables.append(_elem765) + (_etype772, _size769) = iprot.readListBegin() + for _i773 in xrange(_size769): + _elem774 = Table() + _elem774.read(iprot) + self.tables.append(_elem774) iprot.readListEnd() else: iprot.skip(ftype) @@ -17820,8 +18181,8 @@ def write(self, oprot): if self.tables is not None: oprot.writeFieldBegin('tables', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tables)) - for iter766 in self.tables: - iter766.write(oprot) + for iter775 in self.tables: + iter775.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19029,44 +19390,44 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.pools = [] - (_etype770, _size767) = iprot.readListBegin() - for _i771 in xrange(_size767): - _elem772 = WMPool() - _elem772.read(iprot) - self.pools.append(_elem772) + (_etype779, _size776) = iprot.readListBegin() + for _i780 in xrange(_size776): + _elem781 = WMPool() + _elem781.read(iprot) + self.pools.append(_elem781) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.mappings = [] - (_etype776, _size773) = iprot.readListBegin() - for _i777 in xrange(_size773): - _elem778 = WMMapping() - _elem778.read(iprot) - self.mappings.append(_elem778) + (_etype785, _size782) = iprot.readListBegin() + for _i786 in xrange(_size782): + _elem787 = WMMapping() + _elem787.read(iprot) + self.mappings.append(_elem787) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.triggers = [] - (_etype782, _size779) = iprot.readListBegin() - for _i783 in xrange(_size779): - _elem784 = WMTrigger() - _elem784.read(iprot) - self.triggers.append(_elem784) + (_etype791, _size788) = iprot.readListBegin() + for _i792 in xrange(_size788): + _elem793 = WMTrigger() + _elem793.read(iprot) + self.triggers.append(_elem793) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.poolTriggers = [] - (_etype788, _size785) = iprot.readListBegin() - for _i789 in xrange(_size785): - _elem790 = WMPoolTrigger() - _elem790.read(iprot) - self.poolTriggers.append(_elem790) + (_etype797, _size794) = iprot.readListBegin() + for _i798 in xrange(_size794): + _elem799 = WMPoolTrigger() + _elem799.read(iprot) + self.poolTriggers.append(_elem799) iprot.readListEnd() else: iprot.skip(ftype) @@ -19087,29 +19448,29 @@ def write(self, oprot): if self.pools is not None: oprot.writeFieldBegin('pools', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.pools)) - for iter791 in self.pools: - iter791.write(oprot) + for iter800 in self.pools: + iter800.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.mappings is not None: oprot.writeFieldBegin('mappings', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.mappings)) - for iter792 in self.mappings: - iter792.write(oprot) + for iter801 in self.mappings: + iter801.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter793 in self.triggers: - iter793.write(oprot) + for iter802 in self.triggers: + iter802.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.poolTriggers is not None: oprot.writeFieldBegin('poolTriggers', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.poolTriggers)) - for iter794 in self.poolTriggers: - iter794.write(oprot) + for iter803 in self.poolTriggers: + iter803.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19583,11 +19944,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.resourcePlans = [] - (_etype798, _size795) = iprot.readListBegin() - for _i799 in xrange(_size795): - _elem800 = WMResourcePlan() - _elem800.read(iprot) - self.resourcePlans.append(_elem800) + (_etype807, _size804) = iprot.readListBegin() + for _i808 in xrange(_size804): + _elem809 = WMResourcePlan() + _elem809.read(iprot) + self.resourcePlans.append(_elem809) iprot.readListEnd() else: iprot.skip(ftype) @@ -19604,8 +19965,8 @@ def write(self, oprot): if self.resourcePlans is not None: oprot.writeFieldBegin('resourcePlans', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.resourcePlans)) - for iter801 in self.resourcePlans: - iter801.write(oprot) + for iter810 in self.resourcePlans: + iter810.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19909,20 +20270,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.errors = [] - (_etype805, _size802) = iprot.readListBegin() - for _i806 in xrange(_size802): - _elem807 = iprot.readString() - self.errors.append(_elem807) + (_etype814, _size811) = iprot.readListBegin() + for _i815 in xrange(_size811): + _elem816 = iprot.readString() + self.errors.append(_elem816) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.warnings = [] - (_etype811, _size808) = iprot.readListBegin() - for _i812 in xrange(_size808): - _elem813 = iprot.readString() - self.warnings.append(_elem813) + (_etype820, _size817) = iprot.readListBegin() + for _i821 in xrange(_size817): + _elem822 = iprot.readString() + self.warnings.append(_elem822) iprot.readListEnd() else: iprot.skip(ftype) @@ -19939,15 +20300,15 @@ def write(self, oprot): if self.errors is not None: oprot.writeFieldBegin('errors', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.errors)) - for iter814 in self.errors: - oprot.writeString(iter814) + for iter823 in self.errors: + oprot.writeString(iter823) oprot.writeListEnd() oprot.writeFieldEnd() if self.warnings is not None: oprot.writeFieldBegin('warnings', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.warnings)) - for iter815 in self.warnings: - oprot.writeString(iter815) + for iter824 in self.warnings: + oprot.writeString(iter824) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20524,11 +20885,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.triggers = [] - (_etype819, _size816) = iprot.readListBegin() - for _i820 in xrange(_size816): - _elem821 = WMTrigger() - _elem821.read(iprot) - self.triggers.append(_elem821) + (_etype828, _size825) = iprot.readListBegin() + for _i829 in xrange(_size825): + _elem830 = WMTrigger() + _elem830.read(iprot) + self.triggers.append(_elem830) iprot.readListEnd() else: iprot.skip(ftype) @@ -20545,8 +20906,8 @@ def write(self, oprot): if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter822 in self.triggers: - iter822.write(oprot) + for iter831 in self.triggers: + iter831.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21730,11 +22091,11 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.cols = [] - (_etype826, _size823) = iprot.readListBegin() - for _i827 in xrange(_size823): - _elem828 = FieldSchema() - _elem828.read(iprot) - self.cols.append(_elem828) + (_etype835, _size832) = iprot.readListBegin() + for _i836 in xrange(_size832): + _elem837 = FieldSchema() + _elem837.read(iprot) + self.cols.append(_elem837) iprot.readListEnd() else: iprot.skip(ftype) @@ -21794,8 +22155,8 @@ def write(self, oprot): if self.cols is not None: oprot.writeFieldBegin('cols', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.cols)) - for iter829 in self.cols: - iter829.write(oprot) + for iter838 in self.cols: + iter838.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.state is not None: @@ -22050,11 +22411,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.schemaVersions = [] - (_etype833, _size830) = iprot.readListBegin() - for _i834 in xrange(_size830): - _elem835 = SchemaVersionDescriptor() - _elem835.read(iprot) - self.schemaVersions.append(_elem835) + (_etype842, _size839) = iprot.readListBegin() + for _i843 in xrange(_size839): + _elem844 = SchemaVersionDescriptor() + _elem844.read(iprot) + self.schemaVersions.append(_elem844) iprot.readListEnd() else: iprot.skip(ftype) @@ -22071,8 +22432,8 @@ def write(self, oprot): if self.schemaVersions is not None: oprot.writeFieldBegin('schemaVersions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.schemaVersions)) - for iter836 in self.schemaVersions: - iter836.write(oprot) + for iter845 in self.schemaVersions: + iter845.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22557,11 +22918,11 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partitions = [] - (_etype840, _size837) = iprot.readListBegin() - for _i841 in xrange(_size837): - _elem842 = Partition() - _elem842.read(iprot) - self.partitions.append(_elem842) + (_etype849, _size846) = iprot.readListBegin() + for _i850 in xrange(_size846): + _elem851 = Partition() + _elem851.read(iprot) + self.partitions.append(_elem851) iprot.readListEnd() else: iprot.skip(ftype) @@ -22606,8 +22967,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter843 in self.partitions: - iter843.write(oprot) + for iter852 in self.partitions: + iter852.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environmentContext is not None: @@ -22759,10 +23120,10 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partVals = [] - (_etype847, _size844) = iprot.readListBegin() - for _i848 in xrange(_size844): - _elem849 = iprot.readString() - self.partVals.append(_elem849) + (_etype856, _size853) = iprot.readListBegin() + for _i857 in xrange(_size853): + _elem858 = iprot.readString() + self.partVals.append(_elem858) iprot.readListEnd() else: iprot.skip(ftype) @@ -22802,8 +23163,8 @@ def write(self, oprot): if self.partVals is not None: oprot.writeFieldBegin('partVals', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partVals)) - for iter850 in self.partVals: - oprot.writeString(iter850) + for iter859 in self.partVals: + oprot.writeString(iter859) oprot.writeListEnd() oprot.writeFieldEnd() if self.newPart is not None: diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index e0c6c02715..cca47183d6 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -1549,11 +1549,15 @@ class ColumnStatisticsObj COLNAME = 1 COLTYPE = 2 STATSDATA = 3 + ISSTATSCOMPLIANT = 4 + WRITEID = 5 FIELDS = { COLNAME => {:type => ::Thrift::Types::STRING, :name => 'colName'}, COLTYPE => {:type => ::Thrift::Types::STRING, :name => 'colType'}, - STATSDATA => {:type => ::Thrift::Types::STRUCT, :name => 'statsData', :class => ::ColumnStatisticsData} + STATSDATA => {:type => ::Thrift::Types::STRUCT, :name => 'statsData', :class => ::ColumnStatisticsData}, + ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :optional => true} } def struct_fields; FIELDS; end @@ -1600,12 +1604,10 @@ class ColumnStatistics include ::Thrift::Struct, ::Thrift::Struct_Union STATSDESC = 1 STATSOBJ = 2 - ISSTATSCOMPLIANT = 3 FIELDS = { STATSDESC => {:type => ::Thrift::Types::STRUCT, :name => 'statsDesc', :class => ::ColumnStatisticsDesc}, - STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}, - ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} + STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}} } def struct_fields; FIELDS; end @@ -1622,12 +1624,10 @@ class AggrStats include ::Thrift::Struct, ::Thrift::Struct_Union COLSTATS = 1 PARTSFOUND = 2 - ISSTATSCOMPLIANT = 3 FIELDS = { COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}, - PARTSFOUND => {:type => ::Thrift::Types::I64, :name => 'partsFound'}, - ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} + PARTSFOUND => {:type => ::Thrift::Types::I64, :name => 'partsFound'} } def struct_fields; FIELDS; end @@ -1680,6 +1680,92 @@ class SetPartitionsStatsResponse ::Thrift::Struct.generate_accessors self end +class SetBasicStatsRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DESC = 1 + ISVALID = 2 + LEGACYSTATS = 3 + WRITEID = 4 + VALIDWRITEIDLIST = 5 + + FIELDS = { + DESC => {:type => ::Thrift::Types::STRUCT, :name => 'desc', :class => ::ColumnStatisticsDesc}, + ISVALID => {:type => ::Thrift::Types::BOOL, :name => 'isValid'}, + LEGACYSTATS => {:type => ::Thrift::Types::MAP, :name => 'legacyStats', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}, :optional => true}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field desc is unset!') unless @desc + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field isValid is unset!') if @isValid.nil? + end + + ::Thrift::Struct.generate_accessors self +end + +class SetBasicStatsResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + RESULT = 1 + + FIELDS = { + RESULT => {:type => ::Thrift::Types::BOOL, :name => 'result'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field result is unset!') if @result.nil? + end + + ::Thrift::Struct.generate_accessors self +end + +class InvalidateColumnStatsRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + CATNAME = 1 + DBNAME = 2 + TABLENAME = 3 + PARTNAME = 4 + WRITEID = 5 + + FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, + PARTNAME => {:type => ::Thrift::Types::STRING, :name => 'partName', :optional => true}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName + end + + ::Thrift::Struct.generate_accessors self +end + +class InvalidateColumnStatsResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + RESULT = 1 + + FIELDS = { + RESULT => {:type => ::Thrift::Types::BOOL, :name => 'result'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field result is unset!') if @result.nil? + end + + ::Thrift::Struct.generate_accessors self +end + class Schema include ::Thrift::Struct, ::Thrift::Struct_Union FIELDSCHEMAS = 1 @@ -2133,11 +2219,9 @@ end class TableStatsResult include ::Thrift::Struct, ::Thrift::Struct_Union TABLESTATS = 1 - ISSTATSCOMPLIANT = 2 FIELDS = { - TABLESTATS => {:type => ::Thrift::Types::LIST, :name => 'tableStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}, - ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} + TABLESTATS => {:type => ::Thrift::Types::LIST, :name => 'tableStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}} } def struct_fields; FIELDS; end @@ -2152,11 +2236,9 @@ end class PartitionsStatsResult include ::Thrift::Struct, ::Thrift::Struct_Union PARTSTATS = 1 - ISSTATSCOMPLIANT = 2 FIELDS = { - PARTSTATS => {:type => ::Thrift::Types::MAP, :name => 'partStats', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}}, - ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true} + PARTSTATS => {:type => ::Thrift::Types::MAP, :name => 'partStats', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}} } def struct_fields; FIELDS; end diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 92424a43fe..6e903e6b0e 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -1814,6 +1814,63 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'update_partition_column_statistics_req failed: unknown result') end + def update_table_basic_statistics_req(req) + send_update_table_basic_statistics_req(req) + return recv_update_table_basic_statistics_req() + end + + def send_update_table_basic_statistics_req(req) + send_message('update_table_basic_statistics_req', Update_table_basic_statistics_req_args, :req => req) + end + + def recv_update_table_basic_statistics_req() + result = receive_message(Update_table_basic_statistics_req_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + raise result.o4 unless result.o4.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'update_table_basic_statistics_req failed: unknown result') + end + + def update_partition_basic_statistics_req(req) + send_update_partition_basic_statistics_req(req) + return recv_update_partition_basic_statistics_req() + end + + def send_update_partition_basic_statistics_req(req) + send_message('update_partition_basic_statistics_req', Update_partition_basic_statistics_req_args, :req => req) + end + + def recv_update_partition_basic_statistics_req() + result = receive_message(Update_partition_basic_statistics_req_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + raise result.o4 unless result.o4.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'update_partition_basic_statistics_req failed: unknown result') + end + + def invalidate_all_column_statistics_req(req) + send_invalidate_all_column_statistics_req(req) + return recv_invalidate_all_column_statistics_req() + end + + def send_invalidate_all_column_statistics_req(req) + send_message('invalidate_all_column_statistics_req', Invalidate_all_column_statistics_req_args, :req => req) + end + + def recv_invalidate_all_column_statistics_req() + result = receive_message(Invalidate_all_column_statistics_req_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + raise result.o4 unless result.o4.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'invalidate_all_column_statistics_req failed: unknown result') + end + def get_table_column_statistics(db_name, tbl_name, col_name) send_get_table_column_statistics(db_name, tbl_name, col_name) return recv_get_table_column_statistics() @@ -5017,6 +5074,57 @@ module ThriftHiveMetastore write_result(result, oprot, 'update_partition_column_statistics_req', seqid) end + def process_update_table_basic_statistics_req(seqid, iprot, oprot) + args = read_args(iprot, Update_table_basic_statistics_req_args) + result = Update_table_basic_statistics_req_result.new() + begin + result.success = @handler.update_table_basic_statistics_req(args.req) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::InvalidObjectException => o2 + result.o2 = o2 + rescue ::MetaException => o3 + result.o3 = o3 + rescue ::InvalidInputException => o4 + result.o4 = o4 + end + write_result(result, oprot, 'update_table_basic_statistics_req', seqid) + end + + def process_update_partition_basic_statistics_req(seqid, iprot, oprot) + args = read_args(iprot, Update_partition_basic_statistics_req_args) + result = Update_partition_basic_statistics_req_result.new() + begin + result.success = @handler.update_partition_basic_statistics_req(args.req) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::InvalidObjectException => o2 + result.o2 = o2 + rescue ::MetaException => o3 + result.o3 = o3 + rescue ::InvalidInputException => o4 + result.o4 = o4 + end + write_result(result, oprot, 'update_partition_basic_statistics_req', seqid) + end + + def process_invalidate_all_column_statistics_req(seqid, iprot, oprot) + args = read_args(iprot, Invalidate_all_column_statistics_req_args) + result = Invalidate_all_column_statistics_req_result.new() + begin + result.success = @handler.invalidate_all_column_statistics_req(args.req) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::InvalidObjectException => o2 + result.o2 = o2 + rescue ::MetaException => o3 + result.o3 = o3 + rescue ::InvalidInputException => o4 + result.o4 = o4 + end + write_result(result, oprot, 'invalidate_all_column_statistics_req', seqid) + end + def process_get_table_column_statistics(seqid, iprot, oprot) args = read_args(iprot, Get_table_column_statistics_args) result = Get_table_column_statistics_result.new() @@ -10363,6 +10471,126 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Update_table_basic_statistics_req_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::SetBasicStatsRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Update_table_basic_statistics_req_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + O3 = 3 + O4 = 4 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::SetBasicStatsResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidObjectException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException}, + O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::InvalidInputException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Update_partition_basic_statistics_req_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::SetBasicStatsRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Update_partition_basic_statistics_req_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + O3 = 3 + O4 = 4 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::SetBasicStatsResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidObjectException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException}, + O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::InvalidInputException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Invalidate_all_column_statistics_req_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::InvalidateColumnStatsRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Invalidate_all_column_statistics_req_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + O3 = 3 + O4 = 4 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::InvalidateColumnStatsResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidObjectException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException}, + O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::InvalidInputException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Get_table_column_statistics_args include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 diff --git standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 8a4bdd8ed8..c877cbaa09 100644 --- standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -567,7 +567,9 @@ union ColumnStatisticsData { struct ColumnStatisticsObj { 1: required string colName, 2: required string colType, -3: required ColumnStatisticsData statsData +3: required ColumnStatisticsData statsData, +4: optional bool isStatsCompliant, +5: optional i64 writeId } struct ColumnStatisticsDesc { @@ -581,15 +583,12 @@ struct ColumnStatisticsDesc { struct ColumnStatistics { 1: required ColumnStatisticsDesc statsDesc, -2: required list statsObj, -3: optional bool isStatsCompliant // Are the stats isolation-level-compliant with the - // the calling query? +2: required list statsObj } struct AggrStats { 1: required list colStats, 2: required i64 partsFound, // number of partitions for which stats were found -3: optional bool isStatsCompliant } struct SetPartitionsStatsRequest { @@ -603,6 +602,30 @@ struct SetPartitionsStatsResponse { 1: required bool result; } +struct SetBasicStatsRequest { +1: required ColumnStatisticsDesc desc, // Column... is a misnomer. Applies to any stats. +2: required bool isValid, +3: optional map legacyStats, // this may be normalized later +4: optional i64 writeId=-1, // writeId for the current query that updates the stats +5: optional string validWriteIdList // valid write id list for the table for which this struct is being sent +} + +struct SetBasicStatsResponse { +1: required bool result; +} + +struct InvalidateColumnStatsRequest { +1: optional string catName +2: required string dbName, +3: required string tableName, +4: optional string partName, +5: optional i64 writeId=-1 // writeId for the current query that updates the stats +} + +struct InvalidateColumnStatsResponse { +1: required bool result; +} + // schema of the table/query results etc. struct Schema { // column names, types, comments @@ -729,13 +752,11 @@ struct PartitionsByExprRequest { } struct TableStatsResult { - 1: required list tableStats, - 2: optional bool isStatsCompliant + 1: required list tableStats } struct PartitionsStatsResult { - 1: required map> partStats, - 2: optional bool isStatsCompliant + 1: required map> partStats } struct TableStatsRequest { @@ -2058,6 +2079,14 @@ service ThriftHiveMetastore extends fb303.FacebookService SetPartitionsStatsResponse update_partition_column_statistics_req(1:SetPartitionsStatsRequest req) throws (1:NoSuchObjectException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4) + SetBasicStatsResponse update_table_basic_statistics_req(1:SetBasicStatsRequest req) throws (1:NoSuchObjectException o1, + 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4) + SetBasicStatsResponse update_partition_basic_statistics_req(1:SetBasicStatsRequest req) throws (1:NoSuchObjectException o1, + 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4) + + InvalidateColumnStatsResponse invalidate_all_column_statistics_req(1:InvalidateColumnStatsRequest req) throws (1:NoSuchObjectException o1, + 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4) + // get APIs return the column statistics corresponding to db_name, tbl_name, [part_name], col_name if // such statistics exists. If the required statistics doesn't exist, get APIs throw NoSuchObjectException diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java index 35be3c4d72..10e6d8ad22 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java @@ -17,38 +17,22 @@ */ package org.apache.hadoop.hive.common; -import java.io.IOException; +import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.TreeMap; -import com.google.common.collect.ImmutableList; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.DeserializationContext; -import com.fasterxml.jackson.databind.JsonDeserializer; -import com.fasterxml.jackson.databind.JsonSerializer; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.SerializerProvider; -import com.fasterxml.jackson.databind.annotation.JsonDeserialize; -import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.google.common.collect.ImmutableList; /** * A class that defines the constant strings used by the statistics implementation. */ - public class StatsSetupConst { protected static final Logger LOG = LoggerFactory.getLogger(StatsSetupConst.class.getName()); @@ -122,6 +106,7 @@ public String getAggregator(Configuration conf) { public static final List SUPPORTED_STATS = ImmutableList.of( NUM_FILES, ROW_COUNT, TOTAL_SIZE, RAW_DATA_SIZE, NUM_ERASURE_CODED_FILES); + public static final String COLUMN_STATS_ACCURATE_DEPRECATED = "COLUMN_STATS_ACCURATE"; /** * List of all statistics that need to be collected during query execution. These are * statistics that inherently require a scan of the data. @@ -147,14 +132,6 @@ public String getAggregator(Configuration conf) { // update should take place, such as with replication. public static final String DO_NOT_UPDATE_STATS = "DO_NOT_UPDATE_STATS"; - //This string constant will be persisted in metastore to indicate whether corresponding - //table or partition's statistics and table or partition's column statistics are accurate or not. - public static final String COLUMN_STATS_ACCURATE = "COLUMN_STATS_ACCURATE"; - - public static final String COLUMN_STATS = "COLUMN_STATS"; - - public static final String BASIC_STATS = "BASIC_STATS"; - public static final String CASCADE = "CASCADE"; public static final String TRUE = "true"; @@ -163,9 +140,28 @@ public String getAggregator(Configuration conf) { // The parameter keys for the table statistics. Those keys are excluded from 'show create table' command output. public static final List TABLE_PARAMS_STATS_KEYS = ImmutableList.of( - COLUMN_STATS_ACCURATE, NUM_FILES, TOTAL_SIZE, ROW_COUNT, RAW_DATA_SIZE, NUM_PARTITIONS, - NUM_ERASURE_CODED_FILES); + NUM_FILES, TOTAL_SIZE, ROW_COUNT, RAW_DATA_SIZE, NUM_PARTITIONS, NUM_ERASURE_CODED_FILES); + + public static void setStatsStateForCreateTable(Map params) { + for (String stat : StatsSetupConst.SUPPORTED_STATS) { + params.put(stat, "0"); + } + } + + + public static Map extractStats( + Map partParameters) { + Map map = new HashMap<>(); + for (String key : TABLE_PARAMS_STATS_KEYS) { + String val = partParameters.get(key); + if (val == null) continue; + map.put(key, val); + } + return map; + } + +/** TODO## move this for a conversion script private static class ColumnStatsAccurate { private static ObjectReader objectReader; private static ObjectWriter objectWriter; @@ -205,132 +201,8 @@ public Boolean deserialize(JsonParser jsonParser, @JsonSerialize(contentUsing = BooleanSerializer.class) @JsonDeserialize(contentUsing = BooleanDeserializer.class) TreeMap columnStats = new TreeMap<>(); - - } - - public static boolean areBasicStatsUptoDate(Map params) { - if (params == null) { - return false; - } - ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE)); - return stats.basicStats; - } - - public static boolean areColumnStatsUptoDate(Map params, String colName) { - if (params == null) { - return false; - } - ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE)); - return stats.columnStats.containsKey(colName); - } - - // It will only throw JSONException when stats.put(BASIC_STATS, TRUE) - // has duplicate key, which is not possible - // note that set basic stats false will wipe out column stats too. - public static void setBasicStatsState(Map params, String setting) { - if (setting.equals(FALSE)) { - if (params!=null && params.containsKey(COLUMN_STATS_ACCURATE)) { - params.remove(COLUMN_STATS_ACCURATE); - } - return; - } - if (params == null) { - throw new RuntimeException("params are null...cant set columnstatstate!"); - } - ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE)); - stats.basicStats = true; - try { - params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats)); - } catch (JsonProcessingException e) { - throw new RuntimeException("can't serialize column stats", e); - } - } - - public static void setColumnStatsState(Map params, List colNames) { - if (params == null) { - throw new RuntimeException("params are null...cant set columnstatstate!"); - } - if (colNames == null) { - return; - } - ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE)); - - for (String colName : colNames) { - if (!stats.columnStats.containsKey(colName)) { - stats.columnStats.put(colName, true); - } - } - try { - params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats)); - } catch (JsonProcessingException e) { - LOG.trace(e.getMessage()); - } - } - - public static boolean canColumnStatsMerge(Map params, String colName) { - if (params == null) { - return false; - } - // TODO: should this also check that the basic flag is valid? - ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE)); - return stats.columnStats.containsKey(colName); - } - - public static void clearColumnStatsState(Map params) { - if (params == null) { - return; - } - - ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE)); - stats.columnStats.clear(); - - try { - params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats)); - } catch (JsonProcessingException e) { - LOG.trace(e.getMessage()); - } } - public static void removeColumnStatsState(Map params, List colNames) { - if (params == null) { - return; - } - try { - ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE)); - for (String string : colNames) { - stats.columnStats.remove(string); - } - params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats)); - } catch (JsonProcessingException e) { - LOG.trace(e.getMessage()); - } - } - - public static void setStatsStateForCreateTable(Map params, - List cols, String setting) { - if (TRUE.equals(setting)) { - for (String stat : StatsSetupConst.SUPPORTED_STATS) { - params.put(stat, "0"); - } - } - setBasicStatsState(params, setting); - if (TRUE.equals(setting)) { - setColumnStatsState(params, cols); - } - } - - private static ColumnStatsAccurate parseStatsAcc(String statsAcc) { - if (statsAcc == null) { - return new ColumnStatsAccurate(); - } - try { - return ColumnStatsAccurate.objectReader.readValue(statsAcc); - } catch (Exception e) { - ColumnStatsAccurate ret = new ColumnStatsAccurate(); - if (TRUE.equalsIgnoreCase(statsAcc)) { - ret.basicStats = true; - } - return ret; - } - } + ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE)); +*/ } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 69f6ed570e..0da313b756 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -318,7 +318,8 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam !isPartitionedTable) { Database db = msdb.getDatabase(catName, newDbName); // Update table stats. For partitioned table, we update stats in alterPartition() - MetaStoreUtils.updateTableStatsSlow(db, newt, wh, false, true, environmentContext); + MetaStoreUtils.updateTableFsStatsSlow( + db, newt, wh, false, true, environmentContext); } if (isPartitionedTable) { @@ -465,14 +466,11 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str "Unable to alter partition because table or database does not exist."); } oldPart = msdb.getPartition(catName, dbname, name, new_part.getValues()); - if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) { + if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext) + && !MetaStoreUtils.isFastStatsSame(oldPart, new_part)) { // if stats are same, no need to update - if (MetaStoreUtils.isFastStatsSame(oldPart, new_part)) { - MetaStoreUtils.updateBasicState(environmentContext, new_part.getParameters()); - } else { - MetaStoreUtils.updatePartitionStatsFast( - new_part, tbl, wh, false, true, environmentContext, false); - } + MetaStoreUtils.updatePartitionStatsFast( + new_part, tbl, wh, false, true, environmentContext, false); } // PartitionView does not have SD. We do not need update its column stats @@ -709,14 +707,10 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str oldParts.add(oldTmpPart); partValsList.add(tmpPart.getValues()); - if (MetaStoreUtils.requireCalStats(oldTmpPart, tmpPart, tbl, environmentContext)) { - // Check if stats are same, no need to update - if (MetaStoreUtils.isFastStatsSame(oldTmpPart, tmpPart)) { - MetaStoreUtils.updateBasicState(environmentContext, tmpPart.getParameters()); - } else { - MetaStoreUtils.updatePartitionStatsFast( - tmpPart, tbl, wh, false, true, environmentContext, false); - } + if (MetaStoreUtils.requireCalStats(oldTmpPart, tmpPart, tbl, environmentContext) + && !MetaStoreUtils.isFastStatsSame(oldTmpPart, tmpPart)) { + MetaStoreUtils.updatePartitionStatsFast( + tmpPart, tbl, wh, false, true, environmentContext, false); } // PartitionView does not have SD and we do not need to update its column stats @@ -848,7 +842,6 @@ void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTa deletedCols.add(statsObj.getColName()); } } - StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols); } } } @@ -923,7 +916,6 @@ private ColumnStatistics updateOrGetPartitionColumnStats( deletedCols.add(statsObj.getColName()); } } - StatsSetupConst.removeColumnStatsState(part.getParameters(), deletedCols); if (!newStatsObjs.isEmpty()) { partColStats.setStatsObj(newStatsObjs); newPartsColStats = partColStats; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index a53d4be03d..787a926817 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -1862,7 +1862,7 @@ private void create_table_core(final RawStore ms, final Table tbl, } if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) && !MetaStoreUtils.isView(tbl)) { - MetaStoreUtils.updateTableStatsSlow(db, tbl, wh, madeDir, false, envContext); + MetaStoreUtils.updateTableFsStatsSlow(db, tbl, wh, madeDir, false, envContext); } // set create time @@ -2680,88 +2680,45 @@ public void drop_table_with_environment_context(final String dbname, final Strin } - private void updateStatsForTruncate(Map props, EnvironmentContext environmentContext) { - if (null == props) { - return; - } - for (String stat : StatsSetupConst.SUPPORTED_STATS) { - String statVal = props.get(stat); - if (statVal != null) { - //In the case of truncate table, we set the stats to be 0. - props.put(stat, "0"); - } - } - //first set basic stats to true - StatsSetupConst.setBasicStatsState(props, StatsSetupConst.TRUE); - environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK); - //then invalidate column stats - StatsSetupConst.clearColumnStatsState(props); - return; - } - - private void alterPartitionForTruncate(RawStore ms, String catName, String dbName, String tableName, - Table table, Partition partition, String validWriteIds, long writeId) throws Exception { - EnvironmentContext environmentContext = new EnvironmentContext(); - updateStatsForTruncate(partition.getParameters(), environmentContext); - - if (!transactionalListeners.isEmpty()) { - MetaStoreListenerNotifier.notifyEvent(transactionalListeners, - EventType.ALTER_PARTITION, - new AlterPartitionEvent(partition, partition, table, true, true, this)); - } - - if (!listeners.isEmpty()) { - MetaStoreListenerNotifier.notifyEvent(listeners, - EventType.ALTER_PARTITION, - new AlterPartitionEvent(partition, partition, table, true, true, this)); - } - - if (writeId > 0) { - partition.setWriteId(writeId); - } - alterHandler.alterPartition(ms, wh, catName, dbName, tableName, null, partition, - environmentContext, this, validWriteIds); - } - private void alterTableStatsForTruncate(RawStore ms, String catName, String dbName, String tableName, Table table, List partNames, String validWriteIds, long writeId) throws Exception { - if (partNames == null) { - if (0 != table.getPartitionKeysSize()) { - for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) { - alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition, - validWriteIds, writeId); + ms.openTransaction(); + boolean isOk = false; + try { + getMS().invalidateAllColumnStatistics(catName, dbName, tableName, partNames, writeId); + if (partNames == null) { + if (0 != table.getPartitionKeysSize()) { + for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) { + getMS().alterPartitionBasicStats(catName, dbName, tableName, partition.getValues(), + generateStatsForTruncate(), true, writeId, validWriteIds); + } + } else { + getMS().alterTableBasicStats(catName, dbName, tableName, + generateStatsForTruncate(), true, writeId, validWriteIds); } } else { - EnvironmentContext environmentContext = new EnvironmentContext(); - updateStatsForTruncate(table.getParameters(), environmentContext); - - if (!transactionalListeners.isEmpty()) { - MetaStoreListenerNotifier.notifyEvent(transactionalListeners, - EventType.ALTER_TABLE, - new AlterTableEvent(table, table, true, true, this)); + for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) { + getMS().alterPartitionBasicStats(catName, dbName, tableName, partition.getValues(), + generateStatsForTruncate(), true, writeId, validWriteIds); } - - if (!listeners.isEmpty()) { - MetaStoreListenerNotifier.notifyEvent(listeners, - EventType.ALTER_TABLE, - new AlterTableEvent(table, table, true, true, this)); - } - - // TODO: this should actually pass thru and set writeId for txn stats. - if (writeId > 0) { - table.setWriteId(writeId); - } - alterHandler.alterTable(ms, wh, catName, dbName, tableName, table, - environmentContext, this, validWriteIds); } - } else { - for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) { - alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition, - validWriteIds, writeId); + isOk = true; + } finally { + if (isOk) { + ms.commitTransaction(); + } else { + ms.rollbackTransaction(); } } - return; + } + + private Map generateStatsForTruncate() { + Map zeroStats = new HashMap<>(); + for (String stat : StatsSetupConst.SUPPORTED_STATS) { + zeroStats.put(stat, "0"); + } + return zeroStats; } private List getLocationsForTruncate(final RawStore ms, @@ -5720,9 +5677,21 @@ public TableStatsResult get_table_statistics_req(TableStatsRequest request) thro // is currently only done on metastore size (see set_aggr...). // For some optimizations we might make use of incorrect stats that are "better than // nothing", so this may change in future. - result = new TableStatsResult((cs == null || cs.getStatsObj() == null - || (cs.isSetIsStatsCompliant() && !cs.isIsStatsCompliant())) - ? Lists.newArrayList() : cs.getStatsObj()); + // Also we don't return stats for a subset of the columns. That would have been easy, + // but the calling code might not expect this for now. + if (cs == null || cs.getStatsObj() == null) { + result = new TableStatsResult(Lists.newArrayList()); + } else { + for (ColumnStatisticsObj obj : cs.getStatsObj()) { + if (!obj.isSetIsStatsCompliant() || !obj.isIsStatsCompliant()) { + result = new TableStatsResult(Lists.newArrayList()); + break; + } + } + if (result == null) { + result = new TableStatsResult(cs.getStatsObj()); + } + } } finally { endFunction("get_table_statistics_req", result == null, null, tblName); } @@ -5792,7 +5761,16 @@ public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsReques // is currently only done on metastore size (see set_aggr...). // For some optimizations we might make use of incorrect stats that are "better than // nothing", so this may change in future. - if (stat.isSetIsStatsCompliant() && !stat.isIsStatsCompliant()) continue; + // Also we don't return stats for a subset of the columns. That would have been easy, + // but the calling code might not expect this for now. + boolean isSomeInaccurate = false; + for (ColumnStatisticsObj obj : stat.getStatsObj()) { + if (!obj.isSetIsStatsCompliant() || !obj.isIsStatsCompliant()) { + isSomeInaccurate = true; + break; + } + } + if (isSomeInaccurate) continue; map.put(stat.getStatsDesc().getPartName(), stat.getStatsObj()); } } @@ -7549,7 +7527,7 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws TExce String dbName = request.getDbName().toLowerCase(); String tblName = request.getTblName().toLowerCase(); startFunction("get_aggr_stats_for", ": table=" + - TableName.getQualified(catName, dbName, tblName)); + TableName.getQualified(catName, dbName, tblName) + "; " + request.getPartNames()); List lowerCaseColNames = new ArrayList<>(request.getColNames().size()); for (String colName : request.getColNames()) { @@ -7566,7 +7544,7 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws TExce lowerCasePartNames, lowerCaseColNames, request.getValidWriteIdList()); return aggrStats; } finally { - endFunction("get_aggr_stats_for", aggrStats == null, null, request.getTblName()); + endFunction("get_aggr_stats_for", aggrStats == null, null, request.getTblName()); } } @@ -7662,40 +7640,12 @@ private boolean updatePartColumnStatsWithMerge(String catName, String dbName, St for (Entry entry : newStatsMap.entrySet()) { ColumnStatistics csNew = entry.getValue(); ColumnStatistics csOld = oldStatsMap.get(entry.getKey()); - boolean isInvalidTxnStats = csOld != null - && csOld.isSetIsStatsCompliant() && !csOld.isIsStatsCompliant(); - Partition part = mapToPart.get(entry.getKey()); - if (isInvalidTxnStats) { - // No columns can be merged; a shortcut for getMergableCols. - csNew.setStatsObj(Lists.newArrayList()); - } else { - // we first use getParameters() to prune the stats - MetaStoreUtils.getMergableCols(csNew, part.getParameters()); - // we merge those that can be merged - if (csOld != null && csOld.getStatsObjSize() != 0 && !csNew.getStatsObj().isEmpty()) { - MetaStoreUtils.mergeColStats(csNew, csOld); - } - } - - if (!csNew.getStatsObj().isEmpty()) { - // We don't short-circuit on errors here anymore. That can leave acid stats invalid. - result = updatePartitonColStatsInternal(t, csNew, - request.getValidWriteIdList(), request.getWriteId()) && result; - } else if (isInvalidTxnStats) { - // For now because the stats state is such as it is, we will invalidate everything. - // Overall the sematics here are not clear - we could invalide only some columns, but does - // that make any physical sense? Could query affect some columns but not others? - part.setWriteId(request.getWriteId()); - StatsSetupConst.clearColumnStatsState(part.getParameters()); - StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE); - ms.alterPartition(catName, dbName, tableName, part.getValues(), part, - request.getValidWriteIdList()); - result = false; - } else { - // TODO: why doesn't the original call for non acid tables invalidate the stats? - LOG.debug("All the column stats " + csNew.getStatsDesc().getPartName() - + " are not accurate to merge."); - } + // Merge will propagate the valid flag from the old stats. + MetaStoreUtils.mergeColStats(csNew, csOld); + if (csNew.getStatsObj().isEmpty()) continue; + // We don't short-circuit on errors here anymore. That can leave acid stats invalid. + result = updatePartitonColStatsInternal(t, csNew, + request.getValidWriteIdList(), request.getWriteId()) && result; } ms.commitTransaction(); isCommitted = true; @@ -7711,45 +7661,19 @@ private boolean updatePartColumnStatsWithMerge(String catName, String dbName, St private boolean updateTableColumnStatsWithMerge(String catName, String dbName, String tableName, List colNames, SetPartitionsStatsRequest request) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - ColumnStatistics firstColStats = request.getColStats().get(0); + ColumnStatistics csNew = request.getColStats().get(0); RawStore ms = getMS(); ms.openTransaction(); boolean isCommitted = false, result = false; try { ColumnStatistics csOld = ms.getTableColumnStatistics(catName, dbName, tableName, colNames, request.getValidWriteIdList()); - // we first use the valid stats list to prune the stats - boolean isInvalidTxnStats = csOld != null - && csOld.isSetIsStatsCompliant() && !csOld.isIsStatsCompliant(); - if (isInvalidTxnStats) { - // No columns can be merged; a shortcut for getMergableCols. - firstColStats.setStatsObj(Lists.newArrayList()); - } else { - Table t = getTable(catName, dbName, tableName); - MetaStoreUtils.getMergableCols(firstColStats, t.getParameters()); - - // we merge those that can be merged - if (csOld != null && csOld.getStatsObjSize() != 0 && !firstColStats.getStatsObj().isEmpty()) { - MetaStoreUtils.mergeColStats(firstColStats, csOld); - } - } - - if (!firstColStats.getStatsObj().isEmpty()) { - result = updateTableColumnStatsInternal(firstColStats, + // Merge will propagate the valid flag from the old stats. + MetaStoreUtils.mergeColStats(csNew, csOld); + if (!csNew.getStatsObj().isEmpty()) { + // We don't short-circuit on errors here anymore. That can leave acid stats invalid. + result = updateTableColumnStatsInternal(csNew, request.getValidWriteIdList(), request.getWriteId()); - } else if (isInvalidTxnStats) { - // For now because the stats state is such as it is, we will invalidate everything. - // Overall the sematics here are not clear - we could invalide only some columns, but does - // that make any physical sense? Could query affect some columns but not others? - Table t = getTable(catName, dbName, tableName); - t.setWriteId(request.getWriteId()); - StatsSetupConst.clearColumnStatsState(t.getParameters()); - StatsSetupConst.setBasicStatsState(t.getParameters(), StatsSetupConst.FALSE); - ms.alterTable(catName, dbName, tableName, t, request.getValidWriteIdList()); - } else { - // TODO: why doesn't the original call for non acid tables invalidate the stats? - LOG.debug("All the column stats are not accurate to merge."); - result = true; } ms.commitTransaction(); @@ -8967,6 +8891,73 @@ public void add_runtime_stats(RuntimeStat stat) throws TException { endFunction("get_runtime_stats", ex == null, ex); } } + + @Override + public SetBasicStatsResponse update_table_basic_statistics_req( + SetBasicStatsRequest req) throws TException { + startFunction("update_table_basic_statistics_req"); + Exception ex = null; + try { + ColumnStatisticsDesc d = req.getDesc(); + if (d.isSetPartName()) { + throw new InvalidInputException("Partition was specified for a table-level call"); + } + getMS().alterTableBasicStats(d.getCatName(), d.getDbName(), d.getTableName(), + req.getLegacyStats(), req.isSetIsValid() && req.isIsValid(), + req.getWriteId(), req.getValidWriteIdList()); + return new SetBasicStatsResponse(true); + } catch (MetaException e) { + LOG.error("Caught exception", e); + ex = e; + throw e; + } finally { + endFunction("update_table_basic_statistics_req", ex == null, ex); + } + } + + @Override + public SetBasicStatsResponse update_partition_basic_statistics_req( + SetBasicStatsRequest req) throws TException { + startFunction("update_partition_basic_statistics_req"); + Exception ex = null; + try { + ColumnStatisticsDesc d = req.getDesc(); + if (!d.isSetPartName()) { + throw new InvalidInputException("Partition was not specified"); + } + List partVals = Warehouse.getPartValuesFromPartName(d.getPartName()); + getMS().alterPartitionBasicStats(d.getCatName(), d.getDbName(), d.getTableName(), partVals, + req.getLegacyStats(), req.isSetIsValid() && req.isIsValid(), + req.getWriteId(), req.getValidWriteIdList()); + return new SetBasicStatsResponse(true); + } catch (MetaException e) { + LOG.error("Caught exception", e); + ex = e; + throw e; + } finally { + endFunction("update_partition_basic_statistics_req", ex == null, ex); + } + } + + @Override + public InvalidateColumnStatsResponse invalidate_all_column_statistics_req( + InvalidateColumnStatsRequest req) throws NoSuchObjectException, + InvalidObjectException, MetaException, InvalidInputException, TException { + startFunction("invalidate_all_column_statistics_req"); + Exception ex = null; + try { + getMS().invalidateAllColumnStatistics( + req.getCatName(), req.getDbName(), req.getTableName(), + Lists.newArrayList(req.getPartName()), req.getWriteId()); + return new InvalidateColumnStatsResponse(true); + } catch (MetaException e) { + LOG.error("Caught exception", e); + ex = e; + throw e; + } finally { + endFunction("invalidate_all_column_statistics_req", ex == null, ex); + } + } } private static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, Configuration conf) diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 91c86a749c..87b24623c5 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -433,6 +433,32 @@ public void alter_table(String catName, String dbName, String tbl_name, Table ne client.alter_table_req(req); } + @Override + public void alterTableBasicStats(String catName, String dbName, String tblName, boolean isValid, + Map basicStats, long writeId, String validWriteIds) throws TException { + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(true, dbName, tblName); + desc.setCatName(catName); + SetBasicStatsRequest req = new SetBasicStatsRequest(desc, isValid); + req.setValidWriteIdList(validWriteIds); + req.setWriteId(writeId); + req.setLegacyStats(basicStats); + client.update_table_basic_statistics_req(req); + } + + @Override + public void alterPartitionBasicStats(String catName, String dbName, String tblName, + String partName, boolean isValid, Map basicStats, + long writeId, String validWriteIds) throws TException { + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tblName); + desc.setCatName(catName); + desc.setPartName(partName); + SetBasicStatsRequest req = new SetBasicStatsRequest(desc, isValid); + req.setValidWriteIdList(validWriteIds); + req.setWriteId(writeId); + req.setLegacyStats(basicStats); + client.update_partition_basic_statistics_req(req); + } + @Deprecated @Override public void renamePartition(final String dbname, final String tableName, final List part_vals, @@ -3575,4 +3601,16 @@ public void addRuntimeStat(RuntimeStat stat) throws TException { req.setMaxCreateTime(maxCreateTime); return client.get_runtime_stats(req); } + + @Override + public void invalidateAllColumnStats( + String catName, String dbName, String tableName, String partName, long writeId) throws TException { + InvalidateColumnStatsRequest req = new InvalidateColumnStatsRequest(dbName, tableName); + req.setCatName(catName); + req.setWriteId(writeId); + if (partName != null) { + req.setPartName(partName); + } + client.invalidate_all_column_statistics_req(req); + } } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 91405b9a33..07eada7c9f 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -3748,4 +3748,15 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam /** Reads runtime statistics. */ List getRuntimeStats(int maxWeight, int maxCreateTime) throws TException; + + void alterTableBasicStats(String catName, String dbName, String tblName, + boolean isValid, Map basicStats, long writeId, + String validWriteIds) throws TException; + + void alterPartitionBasicStats(String catName, String dbName, String tblName, + String partName, boolean isValid, Map basicStats, + long writeId, String validWriteIds) throws TException; + + void invalidateAllColumnStats(String catName, String dbName, + String tableName, String partName, long writeId) throws TException; } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 95d9fe21bd..b3e09e4e47 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -1430,7 +1430,8 @@ public ColumnStatistics getTableStats(final String catName, final String dbName, public AggrStats aggrColStatsForPartitions(String catName, String dbName, String tableName, List partNames, List colNames, boolean useDensityFunctionForNDVEstimation, - double ndvTuner, boolean enableBitVector) throws MetaException { + double ndvTuner, boolean enableBitVector, String writeIdList) throws MetaException { + // TODO# use writeIdList - pass to aggregators if (colNames.isEmpty() || partNames.isEmpty()) { LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval"); return new AggrStats(Collections.emptyList(), 0); // Nothing to aggregate @@ -1452,6 +1453,7 @@ public AggrStats aggrColStatsForPartitions(String catName, String dbName, String // Check the cache first colStatsAggrCached = aggrStatsCache.get(catName, dbName, tableName, colName, partNames); if (colStatsAggrCached != null) { + // TODO# this used to not check txn stats because the check was on partition level, needs to check now colStatsList.add(colStatsAggrCached.getColStats()); partsFound = colStatsAggrCached.getNumPartsCached(); } else { @@ -2770,11 +2772,12 @@ private void dropDanglingColumnDescriptors(List columnDescriptorIdList) return result; } - public Map> getColAndPartNamesWithStats( - String catName, String dbName, String tableName) throws MetaException { - // Could we also join with ACID tables to only get tables with outdated stats? + public Map> getColAndPartNamesWithAccurateStats( + String catName, String dbName, String tableName, String validWriteIds, + boolean isAccurateTarget) throws MetaException { String queryText = "SELECT DISTINCT " + PARTITIONS + ".\"PART_NAME\", " + PART_COL_STATS - + ".\"COLUMN_NAME\" FROM " + TBLS + " INNER JOIN " + DBS + " ON " + TBLS + ".\"DB_ID\" = " + + ".\"COLUMN_NAME\", " + PART_COL_STATS + ".\"WRITE_ID\", " + PART_COL_STATS + + ".\"STATS_ACCURATE\" FROM " + TBLS + " INNER JOIN " + DBS + " ON " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" INNER JOIN " + PARTITIONS + " ON " + TBLS + ".\"TBL_ID\" = " + PARTITIONS + ".\"TBL_ID\" INNER JOIN " + PART_COL_STATS + " ON " + PARTITIONS + ".\"PART_ID\" = " + PART_COL_STATS + ".\"PART_ID\" WHERE " + DBS + ".\"NAME\" = ? AND " @@ -2790,6 +2793,14 @@ private void dropDanglingColumnDescriptors(List columnDescriptorIdList) String lastPartName = null; List cols = null; for (Object[] line : sqlResult) { + boolean isAccurate = extractSqlBoolean(line[3]); + long writeId = 0; + if (validWriteIds != null) { + writeId = extractSqlLong(line[2]); + isAccurate = ObjectStore.isCurrentStatsValidForTheQuery( + conf, isAccurate, writeId, validWriteIds, false); + } + if (isAccurate != isAccurateTarget) continue; String col = extractSqlString(line[1]); String part = extractSqlString(line[0]); if (!part.equals(lastPartName)) { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 8af164efc9..7e6deb683b 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -54,6 +54,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.regex.Pattern; +import java.util.stream.Collectors; import javax.jdo.JDOCanRetryException; import javax.jdo.JDODataStoreException; @@ -1201,9 +1202,11 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException openTransaction(); mtbl = convertToMTable(tbl); + mtbl.setAreStatsAccurate(tbl.isSetIsStatsCompliant() && tbl.isIsStatsCompliant()); if (TxnUtils.isTransactionalTable(tbl)) { mtbl.setWriteId(tbl.getWriteId()); } + pm.makePersistent(mtbl); if (tbl.getCreationMetadata() != null) { @@ -1433,20 +1436,13 @@ public Table getTable(String catName, String dbName, String tableName, // check whether the current version table statistics // in the metastore comply with the client query's snapshot isolation. // Note: a partitioned table has table stats and table snapshot in MPartiiton. - if (writeIdList != null) { - boolean isTxn = tbl != null && TxnUtils.isTransactionalTable(tbl); - if (isTxn && !areTxnStatsSupported) { - StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters."); - } else if (isTxn && tbl.getPartitionKeysSize() == 0) { - if (isCurrentStatsValidForTheQuery(mtable, writeIdList, false)) { - tbl.setIsStatsCompliant(true); - } else { - tbl.setIsStatsCompliant(false); - // Do not make persistent the following state since it is the query specific (not global). - StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters."); - } + tbl.setIsStatsCompliant(true); + boolean isTxn = tbl != null && TxnUtils.isTransactionalTable(tbl); + if (isTxn) { + if (!areTxnStatsSupported || writeIdList == null) { + tbl.setIsStatsCompliant(false); + } else if (tbl.getPartitionKeysSize() == 0) { + tbl.setIsStatsCompliant(isCurrentStatsValidForTheQuery(mtable, writeIdList, false)); } } commited = commitTransaction(); @@ -1495,14 +1491,16 @@ public Table getTable(String catName, String dbName, String tableName, } @Override - public Map> getPartitionColsWithStats(String catName, String dbName, String tableName) + public Map> getPartitionColsWithAccurateStats( + String catName, String dbName, String tableName, String validWriteIds, boolean isAccurate) throws MetaException, NoSuchObjectException { return new GetHelper>>(catName, dbName, null, true, false) { @Override protected Map> getSqlResult( GetHelper>> ctx) throws MetaException { try { - return directSql.getColAndPartNamesWithStats(catName, dbName, tableName); + return directSql.getColAndPartNamesWithAccurateStats( + catName, dbName, tableName, validWriteIds, isAccurate); } catch (Throwable ex) { LOG.error("DirectSQL failed", ex); throw new MetaException(ex.getMessage()); @@ -1942,6 +1940,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, MetaException { // NOTE: we don't set writeId in this method. Write ID is only set after validating the // existing write ID against the caller's valid list. + // Similarly we don't set isStatsCompliant. if (tbl == null) { return null; } @@ -2259,6 +2258,7 @@ public boolean addPartitions(String catName, String dbName, String tblName, List + dbName + "." + tblName + ": " + part); } MPartition mpart = convertToMPart(part, table, true); + mpart.setAreStatsAccurate(part.isSetIsStatsCompliant() && part.isIsStatsCompliant()); toPersist.add(mpart); int now = (int)(System.currentTimeMillis()/1000); @@ -2336,6 +2336,7 @@ public boolean addPartitions(String catName, String dbName, String tblName, if (isValidPartition(part, partitionKeys, ifNotExists)) { MPartition mpart = convertToMPart(part, table, true); + mpart.setAreStatsAccurate(part.isSetIsStatsCompliant() && part.isIsStatsCompliant()); pm.makePersistent(mpart); if (tabGrants != null) { for (MTablePrivilege tab : tabGrants) { @@ -2384,6 +2385,7 @@ public boolean addPartition(Partition part) throws InvalidObjectException, catName, part.getDbName(), part.getTableName()); } MPartition mpart = convertToMPart(part, table, true); + mpart.setAreStatsAccurate(part.isSetIsStatsCompliant() && part.isIsStatsCompliant()); pm.makePersistent(mpart); int now = (int)(System.currentTimeMillis()/1000); @@ -2446,20 +2448,14 @@ public Partition getPartition(String catName, String dbName, String tableName, // If transactional table partition, check whether the current version partition // statistics in the metastore comply with the client query's snapshot isolation. long statsWriteId = mpart.getWriteId(); + part.setIsStatsCompliant(true); if (TxnUtils.isTransactionalTable(table.getParameters())) { - if (!areTxnStatsSupported) { + if (!areTxnStatsSupported || validWriteIds == null) { // Do not make persistent the following state since it is query specific (not global). - StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE); + part.setIsStatsCompliant(false); LOG.info("Removed COLUMN_STATS_ACCURATE from Partition object's parameters."); - } else if (validWriteIds != null) { - if (isCurrentStatsValidForTheQuery(part, statsWriteId, validWriteIds, false)) { - part.setIsStatsCompliant(true); - } else { - part.setIsStatsCompliant(false); - // Do not make persistent the following state since it is query specific (not global). - StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from Partition object's parameters."); - } + } else { + part.setIsStatsCompliant(isCurrentStatsValidForTheQuery(part, statsWriteId, validWriteIds, false)); } } return part; @@ -2570,6 +2566,7 @@ private MPartition convertToMPart(Partition part, MTable mt, boolean useTableCD) throws InvalidObjectException, MetaException { // NOTE: we don't set writeId in this method. Write ID is only set after validating the // existing write ID against the caller's valid list. + // Similarly we don't set isStatsCompliant. if (part == null) { return null; } @@ -4112,16 +4109,12 @@ public Table alterTable(String catName, String dbname, String name, Table newTab // For now only alter name, owner, parameters, cols, bucketcols are allowed oldt.setDatabase(newt.getDatabase()); oldt.setTableName(normalizeIdentifier(newt.getTableName())); - boolean isTxn = TxnUtils.isTransactionalTable(newTable); - if (isTxn && areTxnStatsSupported) { - // Transactional table is altered without a txn. Make sure there are no changes to the flag. - String errorMsg = verifyStatsChangeCtx(oldt.getParameters(), newTable.getParameters(), - newTable.getWriteId(), queryValidWriteIds, false); - if (errorMsg != null) { - throw new MetaException(errorMsg); - } + if ((newTable.isSetWriteId() && oldt.getWriteId() != newTable.getWriteId()) + || (newTable.isSetIsStatsCompliant() && oldt.areStatsAccurate() != newTable.isIsStatsCompliant()) + || hasStatsParams(oldt.getParameters(), newTable.getParameters())) { + throw new MetaException("Stats cannot be modified by alterTable call"); } - boolean isToTxn = isTxn && !TxnUtils.isTransactionalTable(oldt.getParameters()); + oldt.setParameters(newt.getParameters()); oldt.setOwner(newt.getOwner()); oldt.setOwnerType(newt.getOwnerType()); @@ -4142,26 +4135,58 @@ public Table alterTable(String catName, String dbname, String name, Table newTab oldt.setViewExpandedText(newt.getViewExpandedText()); oldt.setRewriteEnabled(newt.isRewriteEnabled()); - // If transactional, update the stats state for the current Stats updater query. - // Don't update for conversion to acid - it doesn't modify stats but passes in qVWIds. - // The fact that it doesn't update stats is verified above. + newTable = convertToTable(oldt); + + // commit the changes + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return newTable; + } + + private static boolean hasStatsParams(Map op, Map np) { + if (hasParamSet(op, np, StatsSetupConst.COLUMN_STATS_ACCURATE_DEPRECATED)) { + return true; + } + for (String k : StatsSetupConst.SUPPORTED_STATS) { + if (hasParamSet(op, np, k)) return true; + } + return false; + } + + private static boolean hasParamSet( + Map op, Map np, String k) { + String ov = op.get(k), nv = np.get(k); + return (nv != null && !nv.equals(ov)); + } + + @Override + public Table alterTableBasicStats(String catName, String dbname, String name, + Map stats, boolean isAccurate, long writeId, String validWriteIds) throws MetaException { + Table newTable; + boolean success = false; + try { + openTransaction(); + MTable oldt = ensureGetTableNormalized(catName, dbname, name); + + boolean isTxn = TxnUtils.isTransactionalTable(oldt.getParameters()); if (isTxn) { if (!areTxnStatsSupported) { - StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE); - } else if (queryValidWriteIds != null && (!isToTxn || newTable.getWriteId() > 0)) { - // Check concurrent INSERT case and set false to the flag. - if (!isCurrentStatsValidForTheQuery(oldt, queryValidWriteIds, true)) { - StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " + - dbname + "." + name + ". will be made persistent."); - } - assert newTable.getWriteId() > 0; - oldt.setWriteId(newTable.getWriteId()); + isAccurate = false; + } else if (writeId == 0 || validWriteIds == null) { + throw new MetaException(generateTxnStatsError(writeId, validWriteIds)); + } else if (!isCurrentStatsValidForTheQuery(oldt, validWriteIds, true)) { + isAccurate = false; } + oldt.setWriteId(writeId); } - newTable = convertToTable(oldt); + oldt.setAreStatsAccurate(isAccurate); + oldt.getParameters().putAll(stats); - // commit the changes + newTable = convertToTable(oldt); success = commitTransaction(); } finally { if (!success) { @@ -4171,30 +4196,65 @@ public Table alterTable(String catName, String dbname, String name, Table newTab return newTable; } - /** - * Verifies that the stats JSON string is unchanged for alter table (txn stats). - * @return Error message with the details of the change, or null if the value has not changed. - */ - private static String verifyStatsChangeCtx(Map oldP, Map newP, - long writeId, String validWriteIds, boolean isColStatsChange) { - if (validWriteIds != null && writeId > 0) return null; // We have txn context. - String oldVal = oldP == null ? null : oldP.get(StatsSetupConst.COLUMN_STATS_ACCURATE); - String newVal = newP == null ? null : newP.get(StatsSetupConst.COLUMN_STATS_ACCURATE); - // We don't need txn context is that stats state is not being changed. - if (StringUtils.isEmpty(oldVal) && StringUtils.isEmpty(newVal)) return null; - if (StringUtils.equalsIgnoreCase(oldVal, newVal)) { - if (!isColStatsChange) return null; // No change in col stats or parameters => assume no change. - // Col stats change while json stays "valid" implies stats change. If the new value is invalid, - // then we don't care. This is super ugly and idiotic. - // It will all become better when we get rid of JSON and store a flag and write ID per stats. - if (!StatsSetupConst.areBasicStatsUptoDate(newP)) return null; - } - // Some change to the stats state is being made; it can only be made with a write ID. - // Note - we could do this: if (writeId > 0 && (validWriteIds != null || !StatsSetupConst.areBasicStatsUptoDate(newP))) { return null; - // However the only way ID list can be absent is if WriteEntity wasn't generated for the alter, which is a separate bug. + private MTable ensureGetTableNormalized(String catName, String dbname, + String name) throws MetaException { + name = normalizeIdentifier(name); + dbname = normalizeIdentifier(dbname); + catName = normalizeIdentifier(catName); + MTable oldt = getMTable(catName, dbname, name); + if (oldt == null) { + throw new MetaException("table " + dbname + "." + name + " doesn't exist"); + } + return oldt; + } + + + @Override + public Partition alterPartitionBasicStats(String catName, String dbname, String name, List partVals, + Map stats, boolean isAccurate, long writeId, String validWriteIds) throws MetaException { + Partition newPart; + boolean success = false; + try { + openTransaction(); + name = normalizeIdentifier(name); + dbname = normalizeIdentifier(dbname); + catName = normalizeIdentifier(catName); + MTable table = getMTable(catName, dbname, name); + if (table == null) { + throw new MetaException("table " + dbname + "." + name + " doesn't exist"); + } + MPartition oldp = getMPartition(catName, dbname, name, partVals); + if (oldp == null) { + throw new MetaException("Partition " + dbname + "." + name + " " + partVals + " does not exist"); + } + boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); + if (isTxn) { + if (!areTxnStatsSupported) { + isAccurate = false; + } else if (writeId == 0 || validWriteIds == null) { + throw new MetaException(generateTxnStatsError(writeId, validWriteIds)); + } else if (!isCurrentStatsValidForTheQuery(oldp, validWriteIds, true)) { + isAccurate = false; + } + oldp.setWriteId(writeId); + } + oldp.setAreStatsAccurate(isAccurate); + oldp.getParameters().putAll(stats); + + newPart = convertToPart(oldp); + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return newPart; + } + + private static String generateTxnStatsError(long writeId, String validWriteIds) { return "Cannot change stats state for a transactional table without providing the transactional" + " write state for verification (new write ID " + writeId + ", valid write IDs " - + validWriteIds + "; current state " + oldVal + "; new state " + newVal; + + validWriteIds; } @Override @@ -4253,14 +4313,10 @@ private Partition alterPartitionNoTxn(String catName, String dbname, String name } oldp.setValues(newp.getValues()); oldp.setPartitionName(newp.getPartitionName()); - boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); - if (isTxn && areTxnStatsSupported) { - // Transactional table is altered without a txn. Make sure there are no changes to the flag. - String errorMsg = verifyStatsChangeCtx(oldp.getParameters(), newPart.getParameters(), - newPart.getWriteId(), validWriteIds, false); - if (errorMsg != null) { - throw new MetaException(errorMsg); - } + if ((newPart.isSetWriteId() && oldp.getWriteId() != newPart.getWriteId()) + || (newPart.isSetIsStatsCompliant() && oldp.areStatsAccurate() != newPart.isIsStatsCompliant()) + || hasStatsParams(oldp.getParameters(), newPart.getParameters())) { + throw new MetaException("Stats cannot be modified by alterPartition call"); } oldp.setParameters(newPart.getParameters()); if (!TableType.VIRTUAL_VIEW.name().equals(oldp.getTable().getTableType())) { @@ -4273,22 +4329,6 @@ private Partition alterPartitionNoTxn(String catName, String dbname, String name oldp.setLastAccessTime(newp.getLastAccessTime()); } - // If transactional, add/update the MUPdaterTransaction - // for the current updater query. - if (isTxn) { - if (!areTxnStatsSupported) { - StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE); - } else if (validWriteIds != null && newPart.getWriteId() > 0) { - // Check concurrent INSERT case and set false to the flag. - if (!isCurrentStatsValidForTheQuery(oldp, validWriteIds, true)) { - StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " + - dbname + "." + name + "." + oldp.getPartitionName() + " will be made persistent."); - } - oldp.setWriteId(newPart.getWriteId()); - } - } - oldCd.t = oldCD; return convertToPart(oldp); } @@ -8434,53 +8474,45 @@ private void writeMPartitionColumnStatistics(Table table, Partition partition, // So let's not use them anywhere unless absolutely necessary. String catName = statsDesc.isSetCatName() ? statsDesc.getCatName() : getDefaultCatalog(conf); Table table = ensureGetTable(catName, statsDesc.getDbName(), statsDesc.getTableName()); + Boolean isAccurate = null; // null means check each stat individually. + boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); + if (!isTxn) { + isAccurate = true; + } else if (!areTxnStatsSupported) { + isAccurate = false; + } else if (writeId == 0 || validWriteIds == null) { + throw new MetaException(generateTxnStatsError(writeId, validWriteIds)); + } + List colNames = new ArrayList<>(); for (ColumnStatisticsObj statsObj : statsObjs) { colNames.add(statsObj.getColName()); } Map oldStats = getPartitionColStats(table, colNames); - for (ColumnStatisticsObj statsObj:statsObjs) { // We have to get mtable again because DataNucleus. MTableColumnStatistics mStatsObj = StatObjectConverter.convertToMTableColumnStatistics( ensureGetMTable(catName, statsDesc.getDbName(), statsDesc.getTableName()), statsDesc, statsObj); - writeMTableColumnStatistics(table, mStatsObj, oldStats.get(statsObj.getColName())); - // There is no need to add colname again, otherwise we will get duplicate colNames. - } - // TODO: (HIVE-20109) ideally the col stats stats should be in colstats, not in the table! - // Set the table properties - // No need to check again if it exists. - String dbname = table.getDbName(); - String name = table.getTableName(); - MTable oldt = getMTable(catName, dbname, name); - Map newParams = new HashMap<>(table.getParameters()); - StatsSetupConst.setColumnStatsState(newParams, colNames); - boolean isTxn = TxnUtils.isTransactionalTable(oldt.getParameters()); - if (isTxn) { - if (!areTxnStatsSupported) { - StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); - } else { - String errorMsg = verifyStatsChangeCtx( - oldt.getParameters(), newParams, writeId, validWriteIds, true); - if (errorMsg != null) { - throw new MetaException(errorMsg); - } - if (!isCurrentStatsValidForTheQuery(oldt, validWriteIds, true)) { - // Make sure we set the flag to invalid regardless of the current value. - StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " - + dbname + "." + name); - } - oldt.setWriteId(writeId); + // The stats are accurate if all of the following are true: + // 1) The caller thinks they are accurate. + // 2) There's no global flag preventing them from being accurate. + // 3) The stats txn state is valid. + boolean isCallerColAccurate = statsObj.isSetIsStatsCompliant() && statsObj.isIsStatsCompliant(); + boolean isColAccurate = isCallerColAccurate && ((isAccurate != null) ? isAccurate + : isCurrentStatsValidForTheQuery(conf, mStatsObj.areStatsAccurate(), + mStatsObj.getWriteId(), validWriteIds, true)); + mStatsObj.setAreStatsAccurate(isColAccurate); + if (isTxn) { + mStatsObj.setWriteId(writeId); } + writeMTableColumnStatistics(table, mStatsObj, oldStats.get(statsObj.getColName())); + // There is no need to add colname again, otherwise we will get duplicate colNames. } - oldt.setParameters(newParams); committed = commitTransaction(); - // TODO: similar to update...Part, this used to do "return committed;"; makes little sense. - return committed ? newParams : null; + return null; } finally { if (!committed) { rollbackTransaction(); @@ -8529,6 +8561,15 @@ private void writeMPartitionColumnStatistics(Table table, Partition partition, Partition partition = convertToPart(getMPartition( catName, statsDesc.getDbName(), statsDesc.getTableName(), partVals)); List colNames = new ArrayList<>(); + Boolean isAccurate = null; // null means check each stat individually. + boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); + if (!isTxn) { + isAccurate = true; + } else if (!areTxnStatsSupported) { + isAccurate = false; + } else if (writeId == 0 || validWriteIds == null) { + throw new MetaException(generateTxnStatsError(writeId, validWriteIds)); + } for(ColumnStatisticsObj statsObj : statsObjs) { colNames.add(statsObj.getColName()); @@ -8546,36 +8587,74 @@ private void writeMPartitionColumnStatistics(Table table, Partition partition, for (ColumnStatisticsObj statsObj : statsObjs) { MPartitionColumnStatistics mStatsObj = StatObjectConverter.convertToMPartitionColumnStatistics(mPartition, statsDesc, statsObj); + // The stats are accurate if all of the following are true: + // 1) The caller thinks they are accurate. + // 2) There's no global flag preventing them from being accurate. + // 3) The stats txn state is valid. + boolean isCallerColAccurate = statsObj.isSetIsStatsCompliant() && statsObj.isIsStatsCompliant(); + boolean isColAccurate = isCallerColAccurate && ((isAccurate != null) ? isAccurate + : isCurrentStatsValidForTheQuery(conf, mStatsObj.areStatsAccurate(), mStatsObj.getWriteId(), + validWriteIds, true)); + mStatsObj.setAreStatsAccurate(isColAccurate); + if (isTxn) { + mStatsObj.setWriteId(writeId); + } writeMPartitionColumnStatistics(table, partition, mStatsObj, oldStats.get(statsObj.getColName())); } - // TODO: (HIVE-20109) the col stats stats should be in colstats, not in the partition! - Map newParams = new HashMap<>(mPartition.getParameters()); - StatsSetupConst.setColumnStatsState(newParams, colNames); - boolean isTxn = TxnUtils.isTransactionalTable(table); - if (isTxn) { - if (!areTxnStatsSupported) { - StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); + + committed = commitTransaction(); + // TODO# change return type based on the cache change + return null; + } finally { + if (!committed) { + rollbackTransaction(); + } + } + } + + + @Override + public Map invalidateAllColumnStatistics(String catName, String dbName, + String tblName, List partNames, long writeId) + throws MetaException, NoSuchObjectException { + boolean committed = false; + + try { + openTransaction(); + MTable table = ensureGetTableNormalized(catName, dbName, tblName); + if (writeId <= 0 && TxnUtils.isTransactionalTable(table.getParameters())) { + throw new MetaException("writeId was not specified when invalidating transactional stats"); + } + Table t = convertToTable(table); + List allCols = t.getSd().getCols().stream().map(c -> c.getName()).collect( + Collectors.toList()); + QueryWrapper qw = new QueryWrapper(); + try { + if (table.getPartitionKeys().isEmpty()) { + List allStats = getMTableColumnStatistics(t, allCols, qw); + for (MTableColumnStatistics stat : allStats) { + stat.setWriteId(writeId); + stat.setAreStatsAccurate(false); + } } else { - String errorMsg = verifyStatsChangeCtx( - mPartition.getParameters(), newParams, writeId, validWriteIds, true); - if (errorMsg != null) { - throw new MetaException(errorMsg); + if (partNames == null) { + partNames = getPartitionNamesNoTxn(catName, dbName, tblName, (short)-1); } - if (!isCurrentStatsValidForTheQuery(mPartition, validWriteIds, true)) { - // Make sure we set the flag to invalid regardless of the current value. - StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " - + statsDesc.getDbName() + "." + statsDesc.getTableName() + "." + statsDesc.getPartName()); + List allStats = getMPartitionColumnStatistics( + t, partNames, allCols, qw); + for (MPartitionColumnStatistics stat : allStats) { + stat.setWriteId(writeId); + stat.setAreStatsAccurate(false); } - mPartition.setWriteId(writeId); } + } finally { + qw.close(); } - mPartition.setParameters(newParams); committed = commitTransaction(); - // TODO: what is the "return committed;" about? would it ever return false without throwing? - return committed ? newParams : null; + // TODO# change return type based on the cache change + return null; } finally { if (!committed) { rollbackTransaction(); @@ -8677,16 +8756,16 @@ public ColumnStatistics getTableColumnStatistics( String writeIdList) throws MetaException, NoSuchObjectException { // If the current stats in the metastore doesn't comply with // the isolation level of the query, set No to the compliance flag. - Boolean isCompliant = null; - if (writeIdList != null) { - MTable table = this.getMTable(catName, dbName, tableName); - isCompliant = !TxnUtils.isTransactionalTable(table.getParameters()) - || (areTxnStatsSupported && isCurrentStatsValidForTheQuery(table, writeIdList, false)); - } + MTable table = this.getMTable(catName, dbName, tableName); + boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); ColumnStatistics stats = getTableColumnStatisticsInternal( catName, dbName, tableName, colNames, true, true); - if (stats != null && isCompliant != null) { - stats.setIsStatsCompliant(isCompliant); + if (stats != null) { + for (ColumnStatisticsObj cso : stats.getStatsObj()) { + if (!cso.isIsStatsCompliant()) continue; + cso.setIsStatsCompliant(!isTxn || (areTxnStatsSupported && writeIdList != null + && isCurrentStatsValidForTheQuery(conf, true, cso.getWriteId(), writeIdList, false))); + } } return stats; } @@ -8748,29 +8827,20 @@ protected ColumnStatistics getJdoResult( if (partNames == null && partNames.isEmpty()) { return null; } + MTable table = getMTable(catName, dbName, tableName); + boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); List allStats = getPartitionColumnStatisticsInternal( catName, dbName, tableName, partNames, colNames, true, true); - if (writeIdList != null) { - if (!areTxnStatsSupported) { - for (ColumnStatistics cs : allStats) { - cs.setIsStatsCompliant(false); - } - } else { - // TODO: this could be improved to get partitions in bulk - for (ColumnStatistics cs : allStats) { - MPartition mpart = getMPartition(catName, dbName, tableName, - Warehouse.getPartValuesFromPartName(cs.getStatsDesc().getPartName())); - if (mpart == null - || !isCurrentStatsValidForTheQuery(mpart, writeIdList, false)) { - if (mpart != null) { - LOG.debug("The current metastore transactional partition column statistics for {}.{}.{} " - + "(write ID {}) are not valid for current query ({} {})", dbName, tableName, - mpart.getPartitionName(), mpart.getWriteId(), writeIdList); - } - cs.setIsStatsCompliant(false); - } else { - cs.setIsStatsCompliant(true); - } + for (ColumnStatistics cs : allStats) { + for (ColumnStatisticsObj obj : cs.getStatsObj()) { + if (!obj.isIsStatsCompliant()) continue; // Don't check if it's already invalid. + if (!isTxn) { + obj.setIsStatsCompliant(true); + } else if (!areTxnStatsSupported || writeIdList == null) { + obj.setIsStatsCompliant(false); + } else { + obj.setIsStatsCompliant(isCurrentStatsValidForTheQuery(conf, + obj.isSetIsStatsCompliant() && obj.isIsStatsCompliant(), obj.getWriteId(), writeIdList, false)); } } } @@ -8829,39 +8899,33 @@ protected ColumnStatistics getJdoResult( public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, final List partNames, final List colNames, String writeIdList) throws MetaException, NoSuchObjectException { - // If the current stats in the metastore doesn't comply with - // the isolation level of the query, return null. - if (writeIdList != null) { - if (partNames == null && partNames.isEmpty()) { - return null; - } + if (partNames == null || partNames.isEmpty()) { + return null; + } - MTable table = getMTable(catName, dbName, tblName); - boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); - if (isTxn && !areTxnStatsSupported) { + MTable tbl = getMTable(catName, dbName, tblName); + if (TxnUtils.isTransactionalTable(tbl.getParameters())) { + if (!areTxnStatsSupported || writeIdList == null) { return null; } - - // Loop through the given "partNames" list - // checking isolation-level-compliance of each partition column stats. - for (String partName : partNames) { - MPartition mpart = getMPartition( - catName, dbName, tblName, Warehouse.getPartValuesFromPartName(partName)); - if (!isCurrentStatsValidForTheQuery(mpart, writeIdList, false)) { - LOG.debug("The current metastore transactional partition column statistics " + - "for " + dbName + "." + tblName + "." + mpart.getPartitionName() + " is not valid " + - "for the current query."); - return null; - } - } } - return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + return getAggrStatsInternal(catName, dbName, tblName, partNames, colNames, writeIdList); } @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, final List partNames, final List colNames) throws MetaException, NoSuchObjectException { + MTable tbl = getMTable(catName, dbName, tblName); + if (TxnUtils.isTransactionalTable(tbl.getParameters())) { + return null; + } + return getAggrStatsInternal(catName, dbName, tblName, partNames, colNames, null); + } + + private AggrStats getAggrStatsInternal(String catName, String dbName, + String tblName, final List partNames, final List colNames, String writeIdList) + throws MetaException, NoSuchObjectException { final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); @@ -8871,7 +8935,7 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam protected AggrStats getSqlResult(GetHelper ctx) throws MetaException { return directSql.aggrColStatsForPartitions(catName, dbName, tblName, partNames, - colNames, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); + colNames, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector, writeIdList); } @Override protected AggrStats getJdoResult(GetHelper ctx) @@ -12433,7 +12497,7 @@ public int deleteRuntimeStats(int maxRetainSecs) throws MetaException { */ private boolean isCurrentStatsValidForTheQuery(MTable tbl, String queryValidWriteIdList, boolean isCompleteStatsWriter) throws MetaException { - return isCurrentStatsValidForTheQuery(conf, tbl.getParameters(), tbl.getWriteId(), + return isCurrentStatsValidForTheQuery(conf, tbl.areStatsAccurate(), tbl.getWriteId(), queryValidWriteIdList, isCompleteStatsWriter); } @@ -12454,25 +12518,25 @@ private boolean isCurrentStatsValidForTheQuery(MTable tbl, String queryValidWrit private boolean isCurrentStatsValidForTheQuery(MPartition part, String queryValidWriteIdList, boolean isCompleteStatsWriter) throws MetaException { - return isCurrentStatsValidForTheQuery(conf, part.getParameters(), part.getWriteId(), + return isCurrentStatsValidForTheQuery(conf, part.areStatsAccurate(), part.getWriteId(), queryValidWriteIdList, isCompleteStatsWriter); } private boolean isCurrentStatsValidForTheQuery(Partition part, long partWriteId, String queryValidWriteIdList, boolean isCompleteStatsWriter) throws MetaException { - return isCurrentStatsValidForTheQuery(conf, part.getParameters(), partWriteId, + return isCurrentStatsValidForTheQuery(conf, part.isIsStatsCompliant(), partWriteId, queryValidWriteIdList, isCompleteStatsWriter); } // TODO: move to somewhere else public static boolean isCurrentStatsValidForTheQuery(Configuration conf, - Map statsParams, long statsWriteId, String queryValidWriteIdList, + boolean areStatsAccurate, long statsWriteId, String queryValidWriteIdList, boolean isCompleteStatsWriter) throws MetaException { // Note: can be changed to debug/info to verify the calls. - LOG.debug("isCurrentStatsValidForTheQuery with stats write ID {}; query {}; writer: {} params {}", - statsWriteId, queryValidWriteIdList, isCompleteStatsWriter, statsParams); + LOG.debug("isCurrentStatsValidForTheQuery with stats write ID {}; query {}; writer: {} accurate {}", + statsWriteId, queryValidWriteIdList, isCompleteStatsWriter, areStatsAccurate); // return true since the stats does not seem to be transactional. if (statsWriteId < 1) { return true; @@ -12480,7 +12544,7 @@ public static boolean isCurrentStatsValidForTheQuery(Configuration conf, // This COLUMN_STATS_ACCURATE(CSA) state checking also includes the case that the stats is // written by an aborted transaction but TXNS has no entry for the transaction // after compaction. Don't check for a complete stats writer - it may replace invalid stats. - if (!isCompleteStatsWriter && !StatsSetupConst.areBasicStatsUptoDate(statsParams)) { + if (!isCompleteStatsWriter && !areStatsAccurate) { return false; } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 8d647a0f6a..47e2fd79bc 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -1703,8 +1703,8 @@ void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersio List getAllTableNamesForStats() throws MetaException, NoSuchObjectException; - Map> getPartitionColsWithStats(String catName, String dbName, - String tableName) throws MetaException, NoSuchObjectException; + Map> getPartitionColsWithAccurateStats(String catName, String dbName, + String tableName, String validWriteIds, boolean isAccurate) throws MetaException, NoSuchObjectException; /** * Remove older notification events. @@ -1719,4 +1719,17 @@ void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersio * @param tableName the name of the table for which the dump is being taken */ List getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException; + + Table alterTableBasicStats(String catName, String dbname, String name, + Map stats, boolean isAccurate, long writeId, + String validWriteIds) throws MetaException; + + Partition alterPartitionBasicStats(String catName, String dbname, + String name, List partVals, Map stats, + boolean isAccurate, long writeId, String validWriteIds) + throws MetaException; + + Map invalidateAllColumnStatistics(String catName, + String dbName, String tblName, List partNames, long writeId) + throws MetaException, NoSuchObjectException; } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java index 7a0b21b258..9df7b224a9 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java @@ -133,6 +133,8 @@ public static MTableColumnStatistics convertToMTableColumnStatistics(MTable tabl public static void setFieldsIntoOldStats( MTableColumnStatistics mStatsObj, MTableColumnStatistics oldStatsObj) { + oldStatsObj.setWriteId(mStatsObj.getWriteId()); + oldStatsObj.setAreStatsAccurate(mStatsObj.areStatsAccurate()); if (mStatsObj.getAvgColLen() != null) { oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen()); } @@ -177,6 +179,8 @@ public static void setFieldsIntoOldStats( public static void setFieldsIntoOldStats( MPartitionColumnStatistics mStatsObj, MPartitionColumnStatistics oldStatsObj) { + oldStatsObj.setWriteId(mStatsObj.getWriteId()); + oldStatsObj.setAreStatsAccurate(mStatsObj.areStatsAccurate()); if (mStatsObj.getAvgColLen() != null) { oldStatsObj.setAvgColLen(mStatsObj.getAvgColLen()); } @@ -224,6 +228,8 @@ public static ColumnStatisticsObj getTableColumnStatisticsObj( ColumnStatisticsObj statsObj = new ColumnStatisticsObj(); statsObj.setColType(mStatsObj.getColType()); statsObj.setColName(mStatsObj.getColName()); + statsObj.setIsStatsCompliant(mStatsObj.areStatsAccurate()); + statsObj.setWriteId(mStatsObj.getWriteId()); String colType = mStatsObj.getColType().toLowerCase(); ColumnStatisticsData colStatsData = new ColumnStatisticsData(); @@ -401,6 +407,8 @@ public static ColumnStatisticsObj getPartitionColumnStatisticsObj( ColumnStatisticsObj statsObj = new ColumnStatisticsObj(); statsObj.setColType(mStatsObj.getColType()); statsObj.setColName(mStatsObj.getColName()); + statsObj.setIsStatsCompliant(mStatsObj.areStatsAccurate()); + statsObj.setWriteId(mStatsObj.getWriteId()); String colType = mStatsObj.getColType().toLowerCase(); ColumnStatisticsData colStatsData = new ColumnStatisticsData(); diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index f73047f9ff..8f8273edf8 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -39,7 +39,6 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.DatabaseName; -import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.Deadline; import org.apache.hadoop.hive.metastore.FileMetadataHandler; @@ -870,9 +869,13 @@ public Table getTable(String catName, String dbName, String tblName, tblsPendingPrewarm.prioritizeTableForPrewarm(tblName); return rawStore.getTable(catName, dbName, tblName, validWriteIds); } - if (validWriteIds != null) { - tbl.setParameters(adjustStatsParamsForGet(tbl.getParameters(), - tbl.getParameters(), tbl.getWriteId(), validWriteIds)); + if (tbl.isIsStatsCompliant() && TxnUtils.isTransactionalTable(tbl)) { + if (!areTxnStatsSupported || validWriteIds == null || !ObjectStore.isCurrentStatsValidForTheQuery( + conf, true, tbl.getWriteId(), validWriteIds, false)) { + // Clone to avoid affecting the cached object. + tbl = new Table(tbl); + tbl.setIsStatsCompliant(false); + } } tbl.unsetPrivileges(); @@ -954,15 +957,19 @@ public Partition getPartition(String catName, String dbName, String tblName, return rawStore.getPartition( catName, dbName, tblName, part_vals, validWriteIds); } - if (validWriteIds != null) { - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); - if (table == null) { - // The table containing the partition is not yet loaded in cache - return rawStore.getPartition( - catName, dbName, tblName, part_vals, validWriteIds); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + if (table == null) { + // The table containing the partition is not yet loaded in cache + return rawStore.getPartition( + catName, dbName, tblName, part_vals, validWriteIds); + } + if (TxnUtils.isTransactionalTable(table) && part.isIsStatsCompliant()) { + if (!areTxnStatsSupported || validWriteIds == null || !ObjectStore.isCurrentStatsValidForTheQuery( + conf, true, part.getWriteId(), validWriteIds, false)) { + // Clone to avoid affecting the cached object. + part = new Partition(part); + part.setIsStatsCompliant(false); } - part.setParameters(adjustStatsParamsForGet(table.getParameters(), - part.getParameters(), part.getWriteId(), validWriteIds)); } return part; @@ -1634,39 +1641,24 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN return partitions; } - // Note: ideally this should be above both CachedStore and ObjectStore. - private Map adjustStatsParamsForGet(Map tableParams, - Map params, long statsWriteId, String validWriteIds) throws MetaException { - if (!TxnUtils.isTransactionalTable(tableParams)) return params; // Not a txn table. - if (areTxnStatsSupported && ((validWriteIds == null) - || ObjectStore.isCurrentStatsValidForTheQuery( - conf, params, statsWriteId, validWriteIds, false))) { - // Valid stats are supported for txn tables, and either no verification was requested by the - // caller, or the verification has succeeded. - return params; - } - // Clone the map to avoid affecting the cached value. - params = new HashMap<>(params); - StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE); - return params; - } - // Note: ideally this should be above both CachedStore and ObjectStore. private ColumnStatistics adjustColStatForGet(Map tableParams, - Map params, ColumnStatistics colStat, long statsWriteId, - String validWriteIds) throws MetaException { - colStat.setIsStatsCompliant(true); + ColumnStatistics colStat, long statsWriteId, String validWriteIds) throws MetaException { if (!TxnUtils.isTransactionalTable(tableParams)) return colStat; // Not a txn table. - if (areTxnStatsSupported && ((validWriteIds == null) - || ObjectStore.isCurrentStatsValidForTheQuery( - conf, params, statsWriteId, validWriteIds, false))) { - // Valid stats are supported for txn tables, and either no verification was requested by the - // caller, or the verification has succeeded. - return colStat; - } - // Don't clone; ColStats objects are not cached, only their parts. - colStat.setIsStatsCompliant(false); + for (int i = 0; i < colStat.getStatsObj().size(); ++i) { + ColumnStatisticsObj cso = colStat.getStatsObj().get(i); + if (!cso.isIsStatsCompliant()) continue; // No need to re-check if it's already incorrect. + boolean isCompliant = areTxnStatsSupported && ((validWriteIds != null) + && ObjectStore.isCurrentStatsValidForTheQuery( + conf, true, cso.getWriteId(), validWriteIds, false)); + if (!isCompliant) { + // Clone to avoid affecting the cached object. + ColumnStatisticsObj clone = new ColumnStatisticsObj(cso); + clone.setIsStatsCompliant(false); + colStat.getStatsObj().set(i, clone); + } + } return colStat; } @@ -1724,7 +1716,7 @@ public ColumnStatistics getTableColumnStatistics( ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName); List colStatObjs = sharedCache.getTableColStatsFromCache(catName, dbName, tblName, colNames); - return adjustColStatForGet(table.getParameters(), table.getParameters(), + return adjustColStatForGet(table.getParameters(), new ColumnStatistics(csd, colStatObjs), table.getWriteId(), validWriteIds); } @@ -2593,8 +2585,33 @@ public int deleteRuntimeStats(int maxRetainSecs) throws MetaException { } @Override - public Map> getPartitionColsWithStats(String catName, - String dbName, String tableName) throws MetaException, NoSuchObjectException { - return rawStore.getPartitionColsWithStats(catName, dbName, tableName); + public Map> getPartitionColsWithAccurateStats(String catName, + String dbName, String tableName, String validWriteId, boolean isAccurate) throws MetaException, NoSuchObjectException { + return rawStore.getPartitionColsWithAccurateStats(catName, dbName, tableName, validWriteId, isAccurate); + } + + @Override + public Table alterTableBasicStats(String catName, String dbname, String name, + Map stats, boolean isAccurate, long writeId, String validWriteIds) + throws MetaException { + // TODO# Auto-generated method stub + return rawStore.alterTableBasicStats(catName, dbname, name, stats, isAccurate, writeId, validWriteIds); + } + + @Override + public Partition alterPartitionBasicStats(String catName, String dbname, + String name, List partVals, Map stats, + boolean isAccurate, long writeId, String validWriteIds) + throws MetaException { + // TODO# Auto-generated method stub + return rawStore.alterPartitionBasicStats(catName, dbname, name, partVals, stats, isAccurate, writeId, validWriteIds); + } + + @Override + public Map invalidateAllColumnStatistics(String catName, + String dbName, String tblName, List partNames, long writeId) + throws MetaException, NoSuchObjectException { + // TODO# Auto-generated method stub + return rawStore.invalidateAllColumnStatistics(catName, dbName, tblName, partNames, writeId); } } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java index c18b4c79bf..9327b806bb 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java @@ -43,7 +43,9 @@ public ColumnStatisticsObj aggregate(List colStatsWit colType = cso.getColType(); statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField()); + statsObj.setIsStatsCompliant(true); } + statsObj.setIsStatsCompliant(statsObj.isIsStatsCompliant() && cso.isIsStatsCompliant()); BinaryColumnStatsData newData = cso.getStatsData().getBinaryStats(); if (aggregateData == null) { aggregateData = newData.deepCopy(); diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java index 7630183180..f2f616267c 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java @@ -43,7 +43,9 @@ public ColumnStatisticsObj aggregate(List colStatsWit colType = cso.getColType(); statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField()); + statsObj.setIsStatsCompliant(true); } + statsObj.setIsStatsCompliant(statsObj.isIsStatsCompliant() && cso.isIsStatsCompliant()); BooleanColumnStatsData newData = cso.getStatsData().getBooleanStats(); if (aggregateData == null) { aggregateData = newData.deepCopy(); diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java index 7aaab4a6b9..e78814de84 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java @@ -73,6 +73,7 @@ public static ColumnStatisticsObj newColumnStaticsObj(String colName, String col ColumnStatisticsData csd = new ColumnStatisticsData(); cso.setColName(colName); cso.setColType(colType); + cso.setIsStatsCompliant(true); // Note: aggregates must update this when aggregating. switch (type) { case BOOLEAN_STATS: csd.setBooleanStats(new BooleanColumnStatsData()); diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java index e8ff513f50..a43cbd0ed1 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java @@ -61,7 +61,9 @@ public ColumnStatisticsObj aggregate(List colStatsWit statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField()); LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats); + statsObj.setIsStatsCompliant(true); } + statsObj.setIsStatsCompliant(statsObj.isIsStatsCompliant() && cso.isIsStatsCompliant()); DateColumnStatsDataInspector dateColumnStats = (DateColumnStatsDataInspector) cso.getStatsData().getDateStats(); if (dateColumnStats.getNdvEstimator() == null) { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java index ac7e8e35f9..414bdeb076 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java @@ -64,7 +64,9 @@ public ColumnStatisticsObj aggregate(List colStatsWit cso.getStatsData().getSetField()); LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats); + statsObj.setIsStatsCompliant(true); } + statsObj.setIsStatsCompliant(statsObj.isIsStatsCompliant() && cso.isIsStatsCompliant()); DecimalColumnStatsDataInspector decimalColumnStatsData = (DecimalColumnStatsDataInspector) cso.getStatsData().getDecimalStats(); if (decimalColumnStatsData.getNdvEstimator() == null) { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java index ece77dd51b..186d317a15 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java @@ -61,7 +61,9 @@ public ColumnStatisticsObj aggregate(List colStatsWit cso.getStatsData().getSetField()); LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats); + statsObj.setIsStatsCompliant(true); } + statsObj.setIsStatsCompliant(statsObj.isIsStatsCompliant() && cso.isIsStatsCompliant()); DoubleColumnStatsDataInspector doubleColumnStatsData = (DoubleColumnStatsDataInspector) cso.getStatsData().getDoubleStats(); if (doubleColumnStatsData.getNdvEstimator() == null) { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java index e6823d342a..3395d9501a 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java @@ -62,7 +62,9 @@ public ColumnStatisticsObj aggregate(List colStatsWit cso.getStatsData().getSetField()); LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats); + statsObj.setIsStatsCompliant(true); } + statsObj.setIsStatsCompliant(statsObj.isIsStatsCompliant() && cso.isIsStatsCompliant()); LongColumnStatsDataInspector longColumnStatsData = (LongColumnStatsDataInspector) cso.getStatsData().getLongStats(); if (longColumnStatsData.getNdvEstimator() == null) { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java index 9537647503..d617287846 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java @@ -62,7 +62,10 @@ public ColumnStatisticsObj aggregate(List colStatsWit cso.getStatsData().getSetField()); LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats); + statsObj.setIsStatsCompliant(true); } + // TODO# do all these places need to do a separate writeID check? + statsObj.setIsStatsCompliant(statsObj.isIsStatsCompliant() && cso.isIsStatsCompliant()); StringColumnStatsDataInspector stringColumnStatsData = (StringColumnStatsDataInspector) cso.getStatsData().getStringStats(); if (stringColumnStatsData.getNdvEstimator() == null) { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java index 267c9e8e5a..a534cd0b2a 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java @@ -24,16 +24,17 @@ public class MPartition { private String partitionName; // partitionname ==> (key=value/)*(key=value) - private MTable table; + private MTable table; private List values; private int createTime; private int lastAccessTime; private MStorageDescriptor sd; private Map parameters; private long writeId; - + private boolean areStatsAccurate; + public MPartition() {} - + /** * @param partitionName * @param table @@ -152,6 +153,7 @@ public void setCreateTime(int createTime) { this.createTime = createTime; } + public long getWriteId() { return writeId; } @@ -159,4 +161,12 @@ public long getWriteId() { public void setWriteId(long writeId) { this.writeId = writeId; } + + public boolean areStatsAccurate() { + return areStatsAccurate; + } + + public void setAreStatsAccurate(boolean areStatsAccurate) { + this.areStatsAccurate = areStatsAccurate; + } } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java index 50d9c5b0cf..c19d196fa0 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java @@ -56,6 +56,10 @@ private Long numFalses; private long lastAnalyzed; + private long writeId; + + private boolean areStatsAccurate; + public MPartitionColumnStatistics() {} public String getTableName() { @@ -278,4 +282,20 @@ public void setDecimalHighValue(String decimalHighValue) { public void setBitVector(byte[] bitVector) { this.bitVector = bitVector; } + + public long getWriteId() { + return writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + } + + public boolean areStatsAccurate() { + return areStatsAccurate; + } + + public void setAreStatsAccurate(boolean areStatsAccurate) { + this.areStatsAccurate = areStatsAccurate; + } } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java index deeb97133d..33dc61948f 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java @@ -23,7 +23,7 @@ import java.util.Map; public class MTable { - + private String tableName; private MDatabase database; private MStorageDescriptor sd; @@ -39,6 +39,7 @@ private boolean rewriteEnabled; private String tableType; private long writeId; + private boolean areStatsAccurate; public MTable() {} @@ -280,4 +281,12 @@ public long getWriteId() { public void setWriteId(long writeId) { this.writeId = writeId; } + + public boolean areStatsAccurate() { + return areStatsAccurate; + } + + public void setAreStatsAccurate(boolean areStatsAccurate) { + this.areStatsAccurate = areStatsAccurate; + } } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java index 731cd6f7fa..4a8c11c64c 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java @@ -53,6 +53,8 @@ private Long numTrues; private Long numFalses; private long lastAnalyzed; + private long writeId; + private boolean areStatsAccurate; public MTableColumnStatistics() {} @@ -269,4 +271,21 @@ public void setDecimalHighValue(String decimalHighValue) { public void setBitVector(byte[] bitVector) { this.bitVector = bitVector; } + + + public long getWriteId() { + return writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + } + + public boolean areStatsAccurate() { + return areStatsAccurate; + } + + public void setAreStatsAccurate(boolean areStatsAccurate) { + this.areStatsAccurate = areStatsAccurate; + } } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index 1f559e95bb..e80223469d 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hive.metastore.txn; -import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.classification.RetrySemantics; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -328,7 +327,7 @@ public void markCompacted(CompactionInfo info) throws MetaException { /** * This will remove an entry from the queue after * it has been compacted. - * + * * @param info info on the compaction entry to remove */ @Override @@ -993,7 +992,7 @@ private int getFailedCompactionRetention() { * User initiated compactions don't do this check. * * Do we allow compacting whole table (when it's partitioned)? No, though perhaps we should. - * That would be a meta operations, i.e. first find all partitions for this table (which have + * That would be a meta operations, i.e. first find all partitions for this table (which have * txn info) and schedule each compaction separately. This avoids complications in this logic. */ @Override diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index 7cdcd626a7..696dbaa733 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -683,9 +683,15 @@ public static boolean isFastStatsSame(Partition oldPart, Partition newPart) { * @param forceRecompute Recompute stats even if the passed Table already has * these parameters set */ - public static void updateTableStatsSlow(Database db, Table tbl, Warehouse wh, + public static void updateTableFsStatsSlow(Database db, Table tbl, Warehouse wh, boolean newDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException { + + // Note: 1) This updates FS stats only; those are never used for query results so it's ok to + // change them for transactional tables without txn context. + // 2) This also never alters stats-accurate state, because it doesn't check other stats, + // e.g. row counts, that are actually used for query results. + // DO_NOT_UPDATE_STATS is supposed to be a transient parameter that is only passed via RPC // We want to avoid this property from being persistent. // @@ -694,6 +700,7 @@ public static void updateTableStatsSlow(Database db, Table tbl, Warehouse wh, // // This problem was introduced by HIVE-10228. A better approach would be to pass the property // via the environment context. + // TODO: request, not env context. Map params = tbl.getParameters(); boolean updateStats = true; if ((params != null) && params.containsKey(StatsSetupConst.DO_NOT_UPDATE_STATS)) { @@ -722,14 +729,7 @@ public static void updateTableStatsSlow(Database db, Table tbl, Warehouse wh, populateQuickStats(fileStatus, params); LOG.info("Updated size of table {} to {}", tbl.getTableName(), params.get(StatsSetupConst.TOTAL_SIZE)); - if (environmentContext != null - && environmentContext.isSetProperties() - && StatsSetupConst.TASK.equals(environmentContext.getProperties().get( - StatsSetupConst.STATS_GENERATED))) { - StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE); - } else { - StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE); - } + // Note: DO NOT set stats to accurate here. See above. } /** This method is invalid for MM and ACID tables unless fileStatus comes from AcidUtils. */ @@ -765,19 +765,10 @@ public static boolean areSameColumns(List oldCols, List - params) { - if (params == null) { - return; - } - if (environmentContext != null - && environmentContext.isSetProperties() - && StatsSetupConst.TASK.equals(environmentContext.getProperties().get( - StatsSetupConst.STATS_GENERATED))) { - StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE); - } else { - StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE); - } + // Another legacy method that needs to be converted to not use EC. + public static boolean areStatsGenerated(EnvironmentContext ec) { + return ec != null && ec.isSetProperties() + && StatsSetupConst.TASK.equals(ec.getProperties().get(StatsSetupConst.STATS_GENERATED)); } /** @@ -828,7 +819,7 @@ public static boolean updatePartitionStatsFast(PartitionSpecProxy.PartitionItera // TODO: this is invalid for ACID tables, and we cannot access AcidUtils here. populateQuickStats(fileStatus, params); LOG.warn("Updated size to " + params.get(StatsSetupConst.TOTAL_SIZE)); - updateBasicState(environmentContext, params); + // Note: we don't set stats to valid, because we only update a subset of the stats. } part.setParameters(params); return true; @@ -1043,24 +1034,11 @@ public static boolean partitionNameHasValidCharacters(List partVals, return getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern) == null; } - public static void getMergableCols(ColumnStatistics csNew, Map parameters) { - List list = new ArrayList<>(); - for (int index = 0; index < csNew.getStatsObj().size(); index++) { - ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index); - // canColumnStatsMerge guarantees that it is accurate before we do merge - if (StatsSetupConst.canColumnStatsMerge(parameters, statsObjNew.getColName())) { - list.add(statsObjNew); - } - // in all the other cases, we can not merge - } - csNew.setStatsObj(list); - } - // this function will merge csOld into csNew. - public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld) - throws InvalidObjectException { + public static void mergeColStats( + ColumnStatistics csNew, ColumnStatistics csOld) throws InvalidObjectException { List list = new ArrayList<>(); - if (csNew.getStatsObj().size() != csOld.getStatsObjSize()) { + if (csOld != null && csNew.getStatsObj().size() != csOld.getStatsObjSize()) { // Some of the columns' stats are missing // This implies partition schema has changed. We will merge columns // present in both, overwrite stats for columns absent in metastore and @@ -1072,8 +1050,10 @@ public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld) // In this case, we have to find out which columns can be merged. Map map = new HashMap<>(); // We build a hash map from colName to object for old ColumnStats. - for (ColumnStatisticsObj obj : csOld.getStatsObj()) { - map.put(obj.getColName(), obj); + if (csOld != null) { + for (ColumnStatisticsObj obj : csOld.getStatsObj()) { + map.put(obj.getColName(), obj); + } } for (int index = 0; index < csNew.getStatsObj().size(); index++) { ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index); @@ -1089,6 +1069,13 @@ public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld) statsObjOld); merger.merge(statsObjNew, statsObjOld); } + // TODO: the old code seemingly assumed that stats will be valid if the pre-merge stats are absent. + // That doesn't seem to make sense... when called with merge, the stats would be partial. + // So, if we take the old stats state for some cols into account at all, and the state is + // absent for other columns, we should assume that the "old state" for them is invalid. + // TODO## make sure that the callers actually set to compliant + statsObjNew.setIsStatsCompliant( + statsObjNew.isIsStatsCompliant() && statsObjOld != null && statsObjOld.isIsStatsCompliant()); // If statsObjOld is not found, we just use statsObjNew as it is accurate. list.add(statsObjNew); } diff --git standalone-metastore/metastore-server/src/main/resources/package.jdo standalone-metastore/metastore-server/src/main/resources/package.jdo index 2a5f016b1f..a27febefb7 100644 --- standalone-metastore/metastore-server/src/main/resources/package.jdo +++ standalone-metastore/metastore-server/src/main/resources/package.jdo @@ -213,6 +213,9 @@ + + + @@ -495,6 +498,9 @@ + + + @@ -995,6 +1001,12 @@ + + + + + + @@ -1065,7 +1077,14 @@ + + + + + + + diff --git standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql index c889bbdf96..f7352f6832 100644 --- standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql +++ standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -47,7 +47,7 @@ CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); -CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "WRITE_ID" BIGINT DEFAULT 0); +CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "WRITE_ID" BIGINT DEFAULT 0, "STATS_ACCURATE" CHAR(1) NOT NULL DEFAULT 'N'); CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER); @@ -75,7 +75,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), " CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128)); -CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N', "WRITE_ID" BIGINT DEFAULT 0); +CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N', "WRITE_ID" BIGINT DEFAULT 0, "STATS_ACCURATE" CHAR(1) NOT NULL DEFAULT 'N' ); CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); @@ -106,7 +106,9 @@ CREATE TABLE "APP"."TAB_COL_STATS"( "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, - "BIT_VECTOR" BLOB + "BIT_VECTOR" BLOB, + "WRITE_ID" BIGINT DEFAULT 0, + "STATS_ACCURATE" CHAR(1) NOT NULL DEFAULT 'N' ); ); CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); @@ -155,7 +157,9 @@ CREATE TABLE "APP"."PART_COL_STATS"( "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, - "PART_ID" BIGINT NOT NULL + "PART_ID" BIGINT NOT NULL, + "WRITE_ID" BIGINT DEFAULT 0, + "STATS_ACCURATE" CHAR(1) NOT NULL DEFAULT 'N' ); CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255)); diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/common/TestStatsSetupConst.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/common/TestStatsSetupConst.java deleted file mode 100644 index 24689215c2..0000000000 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/common/TestStatsSetupConst.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.common; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; -import org.junit.Test; - -import com.google.common.collect.Lists; -import org.junit.experimental.categories.Category; - -@Category(MetastoreUnitTest.class) -public class TestStatsSetupConst { - - @Test - public void testSetBasicStatsState_missesUpgrade() { - Map params=new HashMap<>(); - params.put(StatsSetupConst.COLUMN_STATS_ACCURATE, "FALSE"); - StatsSetupConst.setBasicStatsState(params, String.valueOf(true)); - assertEquals("{\"BASIC_STATS\":\"true\"}",params.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); - } - - @Test - public void setColumnStatsState_camelcase() { - Map params=new HashMap<>(); - StatsSetupConst.setColumnStatsState(params, Lists.newArrayList("Foo")); - String val1 = params.get(StatsSetupConst.COLUMN_STATS_ACCURATE); - StatsSetupConst.setColumnStatsState(params, Lists.newArrayList("Foo")); - String val2 = params.get(StatsSetupConst.COLUMN_STATS_ACCURATE); - assertEquals(val1, val2); - } - - @Test - public void testSetBasicStatsState_none() { - Map params=new HashMap<>(); - StatsSetupConst.setBasicStatsState(params, String.valueOf(true)); - assertEquals("{\"BASIC_STATS\":\"true\"}",params.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); - } - - @Test - public void testSetBasicStatsState_falseIsAbsent() { - Map params=new HashMap<>(); - StatsSetupConst.setBasicStatsState(params, String.valueOf(true)); - StatsSetupConst.setBasicStatsState(params, String.valueOf(false)); - assertNull(params.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); - } - - // earlier implementation have quoted boolean values...so the new implementation should preserve this - @Test - public void testStatColumnEntriesCompat() { - Map params0=new HashMap<>(); - StatsSetupConst.setBasicStatsState(params0, String.valueOf(true)); - StatsSetupConst.setColumnStatsState(params0, Lists.newArrayList("Foo")); - - assertEquals("{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"Foo\":\"true\"}}",params0.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); - } - - @Test - public void testColumnEntries_orderIndependence() { - Map params0=new HashMap<>(); - StatsSetupConst.setBasicStatsState(params0, String.valueOf(true)); - StatsSetupConst.setColumnStatsState(params0, Lists.newArrayList("Foo","Bar")); - Map params1=new HashMap<>(); - StatsSetupConst.setColumnStatsState(params1, Lists.newArrayList("Bar","Foo")); - StatsSetupConst.setBasicStatsState(params1, String.valueOf(true)); - - assertEquals(params0.get(StatsSetupConst.COLUMN_STATS_ACCURATE),params1.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); - } - - @Test - public void testColumnEntries_orderIndependence2() { - Map params0=new HashMap<>(); - // in case jackson is able to deserialize...it may use a different implementation for the map - which may not preserve order - StatsSetupConst.setBasicStatsState(params0, String.valueOf(true)); - StatsSetupConst.setColumnStatsState(params0, Lists.newArrayList("year")); - StatsSetupConst.setColumnStatsState(params0, Lists.newArrayList("year","month")); - Map params1=new HashMap<>(); - StatsSetupConst.setColumnStatsState(params1, Lists.newArrayList("month","year")); - StatsSetupConst.setBasicStatsState(params1, String.valueOf(true)); - - System.out.println(params0.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); - assertEquals(params0.get(StatsSetupConst.COLUMN_STATS_ACCURATE),params1.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); - } - - // FIXME: current objective is to keep the previous outputs...but this is possibly bad.. - @Test - public void testColumnEntries_areKept_whenBasicIsAbsent() { - Map params=new HashMap<>(); - StatsSetupConst.setBasicStatsState(params, String.valueOf(false)); - StatsSetupConst.setColumnStatsState(params, Lists.newArrayList("Foo")); - assertEquals("{\"COLUMN_STATS\":{\"Foo\":\"true\"}}",params.get(StatsSetupConst.COLUMN_STATS_ACCURATE)); - } -} diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 09c2509b3d..be61401181 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -1260,9 +1260,31 @@ public void cleanWriteNotificationEvents(int olderThan) { } @Override - public Map> getPartitionColsWithStats(String catName, - String dbName, String tableName) throws MetaException, + public Map> getPartitionColsWithAccurateStats(String catName, + String dbName, String tableName, String validWriteId, boolean isAccurate) throws MetaException, NoSuchObjectException { return null; } + + @Override + public Table alterTableBasicStats(String catName, String dbname, String name, + Map stats, boolean isAccurate, long writeId, + String validWriteIds) throws MetaException { + return null; + } + + @Override + public Partition alterPartitionBasicStats(String catName, String dbname, + String name, List partVals, Map stats, + boolean isAccurate, long writeId, String validWriteIds) + throws MetaException { + return null; + } + + @Override + public Map invalidateAllColumnStatistics(String catName, + String dbName, String tblName, List partNames, long writeId) + throws MetaException, NoSuchObjectException { + return null; + } } diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 3aebaf3419..be179bd7eb 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -1235,8 +1235,8 @@ public int deleteRuntimeStats(int maxRetainSecs) throws MetaException { } @Override - public Map> getPartitionColsWithStats(String catName, - String dbName, String tableName) throws MetaException, + public Map> getPartitionColsWithAccurateStats(String catName, + String dbName, String tableName, String validWriteId, boolean isAccurate) throws MetaException, NoSuchObjectException { return null; } @@ -1249,4 +1249,26 @@ public void cleanWriteNotificationEvents(int olderThan) { public List getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException { return null; } + + @Override + public Table alterTableBasicStats(String catName, String dbname, String name, + Map stats, boolean isAccurate, long writeId, + String validWriteIds) throws MetaException { + return null; + } + + @Override + public Partition alterPartitionBasicStats(String catName, String dbname, + String name, List partVals, Map stats, + boolean isAccurate, long writeId, String validWriteIds) + throws MetaException { + return null; + } + + @Override + public Map invalidateAllColumnStatistics(String catName, + String dbName, String tblName, List partNames, long writeId) + throws MetaException, NoSuchObjectException { + return null; + } } diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 34055d2d4d..8a41afdce7 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -3528,4 +3528,25 @@ public void truncateTable(String dbName, String tableName, throws TException { throw new UnsupportedOperationException(); } + + @Override + public void alterTableBasicStats(String catName, String dbName, + String tblName, boolean isValid, Map basicStats, + long writeId, String validWriteIds) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void alterPartitionBasicStats(String catName, String dbName, + String tblName, String partName, boolean isValid, + Map basicStats, long writeId, String validWriteIds) + throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void invalidateAllColumnStats(String catName, String dbName, + String tableName, String partName, long writeId) { + throw new UnsupportedOperationException(); + } } diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestMetaStoreUtils.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestMetaStoreUtils.java index d5ae5d1c0d..36e7711965 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestMetaStoreUtils.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestMetaStoreUtils.java @@ -42,12 +42,11 @@ import java.util.List; import java.util.Map; -import static org.apache.hadoop.hive.common.StatsSetupConst.COLUMN_STATS_ACCURATE; import static org.apache.hadoop.hive.common.StatsSetupConst.NUM_FILES; import static org.apache.hadoop.hive.common.StatsSetupConst.NUM_ERASURE_CODED_FILES; import static org.apache.hadoop.hive.common.StatsSetupConst.STATS_GENERATED; import static org.apache.hadoop.hive.common.StatsSetupConst.TOTAL_SIZE; -import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.updateTableStatsSlow; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.updateTableFsStatsSlow; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; @@ -158,7 +157,7 @@ public void testUpdateTableStatsSlow_statsUpdated() throws TException { TOTAL_SIZE, String.valueOf(2 * fileLength), NUM_ERASURE_CODED_FILES, "1" ); - updateTableStatsSlow(db, tbl, wh, false, false, null); + updateTableFsStatsSlow(db, tbl, wh, false, false, null); assertThat(tbl.getParameters(), is(expected)); // Verify that when stats are already present and forceRecompute is specified they are recomputed @@ -170,39 +169,8 @@ public void testUpdateTableStatsSlow_statsUpdated() throws TException { .addTableParam(TOTAL_SIZE, "0") .build(null); when(wh.getFileStatusesForUnpartitionedTable(db, tbl1)).thenReturn(fileStatus); - updateTableStatsSlow(db, tbl1, wh, false, true, null); + updateTableFsStatsSlow(db, tbl1, wh, false, true, null); assertThat(tbl1.getParameters(), is(expected)); - - // Verify that COLUMN_STATS_ACCURATE is removed from params - Table tbl2 = new TableBuilder() - .setDbName(DB_NAME) - .setTableName(TABLE_NAME) - .addCol("id", "int") - .addTableParam(COLUMN_STATS_ACCURATE, "true") - .build(null); - when(wh.getFileStatusesForUnpartitionedTable(db, tbl2)).thenReturn(fileStatus); - updateTableStatsSlow(db, tbl2, wh, false, true, null); - assertThat(tbl2.getParameters(), is(expected)); - - EnvironmentContext context = new EnvironmentContext(ImmutableMap.of(STATS_GENERATED, - StatsSetupConst.TASK)); - - // Verify that if environment context has STATS_GENERATED set to task, - // COLUMN_STATS_ACCURATE in params is set to correct value - Table tbl3 = new TableBuilder() - .setDbName(DB_NAME) - .setTableName(TABLE_NAME) - .addCol("id", "int") - .addTableParam(COLUMN_STATS_ACCURATE, "foo") // The value doesn't matter - .build(null); - when(wh.getFileStatusesForUnpartitionedTable(db, tbl3)).thenReturn(fileStatus); - updateTableStatsSlow(db, tbl3, wh, false, true, context); - - Map expected1 = ImmutableMap.of(NUM_FILES, "2", - TOTAL_SIZE, String.valueOf(2 * fileLength), - NUM_ERASURE_CODED_FILES, "1", - COLUMN_STATS_ACCURATE, "{\"BASIC_STATS\":\"true\"}"); - assertThat(tbl3.getParameters(), is(expected1)); } /** @@ -224,10 +192,10 @@ public void testUpdateTableStatsSlow_removesDoNotUpdateStats() throws TException .addTableParam(StatsSetupConst.DO_NOT_UPDATE_STATS, "false") .build(null); Warehouse wh = mock(Warehouse.class); - updateTableStatsSlow(db, tbl, wh, false, true, null); + updateTableFsStatsSlow(db, tbl, wh, false, true, null); assertThat(tbl.getParameters(), is(Collections.emptyMap())); verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl); - updateTableStatsSlow(db, tbl1, wh, true, false, null); + updateTableFsStatsSlow(db, tbl1, wh, true, false, null); assertThat(tbl.getParameters(), is(Collections.emptyMap())); verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl1); } @@ -253,7 +221,7 @@ public void testUpdateTableStatsSlow_doesNotUpdateStats() throws TException { .build(null); Warehouse wh = mock(Warehouse.class); // newDir(true) => stats not updated - updateTableStatsSlow(db, tbl, wh, true, false, null); + updateTableFsStatsSlow(db, tbl, wh, true, false, null); verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl); // partitioned table => stats not updated @@ -263,7 +231,7 @@ public void testUpdateTableStatsSlow_doesNotUpdateStats() throws TException { .addCol("id", "int") .setPartCols(cols) .build(null); - updateTableStatsSlow(db, tbl1, wh, false, false, null); + updateTableFsStatsSlow(db, tbl1, wh, false, false, null); verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl1); // Already contains stats => stats not updated when forceRecompute isn't set @@ -273,7 +241,7 @@ public void testUpdateTableStatsSlow_doesNotUpdateStats() throws TException { .addCol("id", "int") .setTableParams(paramsWithStats) .build(null); - updateTableStatsSlow(db, tbl2, wh, false, false, null); + updateTableFsStatsSlow(db, tbl2, wh, false, false, null); verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl2); }