diff --git a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java index 57afbf8..fba2177 100644 --- a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java @@ -158,6 +158,10 @@ public static String txnIdToString(long txnId) { return "txnid:" + txnId; } + public static String writeIdToString(long writeId) { + return "writeid:" + writeId; + } + public static String txnIdsToString(List txnIds) { return "Transactions requested to be aborted: " + txnIds.toString(); } @@ -166,7 +170,7 @@ private JavaUtils() { // prevent instantiation } - public static Long extractTxnId(Path file) { + public static Long extractWriteId(Path file) { String fileName = file.getName(); String[] parts = fileName.split("_", 4); // e.g. delta_0000001_0000001_0000 or base_0000022 if (parts.length < 2 || !(DELTA_PREFIX.equals(parts[0]) || BASE_PREFIX.equals(parts[0]))) { diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 99e8457..a312224 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1093,7 +1093,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false, "Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)"), HIVESCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist", - "hive.txn.valid.txns,hive.script.operator.env.blacklist", + "hive.txn.valid.txns,hive.txn.tables.valid.writeids,hive.txn.valid.writeids,hive.script.operator.env.blacklist", "Comma separated list of keys from the configuration file not to convert to environment " + "variables when invoking the script operator"), HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT("hive.strict.checks.orderby.no.limit", false, diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java index 4ec10ad..924e233 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java @@ -73,8 +73,8 @@ private final AcidOutputFormat outf; private Object[] bucketFieldData; // Pre-allocated in constructor. Updated on each write. - private Long curBatchMinTxnId; - private Long curBatchMaxTxnId; + private Long curBatchMinWriteId; + private Long curBatchMaxWriteId; private static final class TableWriterPair { private final Table tbl; @@ -143,7 +143,7 @@ public TableWriterPair run() throws Exception { * used to tag error msgs to provied some breadcrumbs */ String getWatermark() { - return partitionPath + " txnIds[" + curBatchMinTxnId + "," + curBatchMaxTxnId + "]"; + return partitionPath + " writeIds[" + curBatchMinWriteId + "," + curBatchMaxWriteId + "]"; } // return the column numbers of the bucketed columns private List getBucketColIDs(List bucketCols, List cols) { @@ -207,15 +207,15 @@ public void clear() throws StreamingIOFailure { /** * Creates a new record updater for the new batch - * @param minTxnId smallest Txnid in the batch - * @param maxTxnID largest Txnid in the batch + * @param minWriteId smallest writeid in the batch + * @param maxWriteID largest writeid in the batch * @throws StreamingIOFailure if failed to create record updater */ @Override - public void newBatch(Long minTxnId, Long maxTxnID) + public void newBatch(Long minWriteId, Long maxWriteID) throws StreamingIOFailure, SerializationError { - curBatchMinTxnId = minTxnId; - curBatchMaxTxnId = maxTxnID; + curBatchMinWriteId = minWriteId; + curBatchMaxWriteId = maxWriteID; updaters = new ArrayList(totalBuckets); for (int bucket = 0; bucket < totalBuckets; bucket++) { updaters.add(bucket, null);//so that get(i) returns null rather than ArrayOutOfBounds @@ -265,7 +265,7 @@ public void closeBatch() throws StreamingIOFailure { return bucketFieldData; } - private RecordUpdater createRecordUpdater(int bucketId, Long minTxnId, Long maxTxnID) + private RecordUpdater createRecordUpdater(int bucketId, Long minWriteId, Long maxWriteID) throws IOException, SerializationError { try { // Initialize table properties from the table parameters. This is required because the table @@ -278,8 +278,8 @@ private RecordUpdater createRecordUpdater(int bucketId, Long minTxnId, Long maxT .inspector(getSerde().getObjectInspector()) .bucket(bucketId) .tableProperties(tblProperties) - .minimumTransactionId(minTxnId) - .maximumTransactionId(maxTxnID) + .minimumWriteId(minWriteId) + .maximumWriteId(maxWriteID) .statementId(-1) .finalDestination(partitionPath)); } catch (SerDeException e) { @@ -292,7 +292,7 @@ RecordUpdater getRecordUpdater(int bucketId) throws StreamingIOFailure, Serializ RecordUpdater recordUpdater = updaters.get(bucketId); if (recordUpdater == null) { try { - recordUpdater = createRecordUpdater(bucketId, curBatchMinTxnId, curBatchMaxTxnId); + recordUpdater = createRecordUpdater(bucketId, curBatchMinWriteId, curBatchMaxWriteId); } catch (IOException e) { String errMsg = "Failed creating RecordUpdater for " + getWatermark(); LOG.error(errMsg, e); diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java index 0a5492c..999c37e 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java @@ -255,16 +255,16 @@ private static boolean areFieldsInColOrder(int[] fieldToColMapping) { } @Override - public void write(long transactionId, byte[] record) + public void write(long writeId, byte[] record) throws SerializationError, StreamingIOFailure { try { byte[] orderedFields = reorderFields(record); Object encodedRow = encode(orderedFields); int bucket = getBucket(encodedRow); - getRecordUpdater(bucket).insert(transactionId, encodedRow); + getRecordUpdater(bucket).insert(writeId, encodedRow); } catch (IOException e) { - throw new StreamingIOFailure("Error writing record in transaction (" - + transactionId + ")", e); + throw new StreamingIOFailure("Error writing record in transaction write id (" + + writeId + ")", e); } } diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index 8943423..2dfc66e 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TxnAbortedException; +import org.apache.hadoop.hive.metastore.api.TxnToWriteId; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; @@ -558,7 +559,7 @@ private static IMetaStoreClient getMetaStoreClient(HiveEndPoint endPoint, HiveCo private final IMetaStoreClient msClient; private final IMetaStoreClient heartbeaterMSClient; private final RecordWriter recordWriter; - private final List txnIds; + private final List txnToWriteIds; //volatile because heartbeat() may be in a "different" thread; updates of this are "piggybacking" private volatile int currentTxnIndex = -1; @@ -609,14 +610,19 @@ private TransactionBatchImpl(final String user, UserGroupInformation ugi, HiveEn this.recordWriter = recordWriter; this.agentInfo = agentInfo; - txnIds = openTxnImpl(msClient, user, numTxns, ugi); + List txnIds = openTxnImpl(msClient, user, numTxns, ugi); + txnToWriteIds = allocateWriteIdsImpl(msClient, txnIds, ugi); + assert(txnToWriteIds.size() == numTxns); + txnStatus = new TxnState[numTxns]; for(int i = 0; i < txnStatus.length; i++) { + assert(txnToWriteIds.get(i).getTxnId() == txnIds.get(i)); txnStatus[i] = TxnState.OPEN;//Open matches Metastore state } - this.state = TxnState.INACTIVE; - recordWriter.newBatch(txnIds.get(0), txnIds.get(txnIds.size()-1)); + + // The Write Ids returned for the transaction batch is also sequential + recordWriter.newBatch(txnToWriteIds.get(0).getWriteId(), txnToWriteIds.get(numTxns-1).getWriteId()); success = true; } catch (TException e) { throw new TransactionBatchUnAvailable(endPt, e); @@ -642,9 +648,22 @@ public Object run() throws Exception { }) ; } + private List allocateWriteIdsImpl(final IMetaStoreClient msClient, final List txnIds, UserGroupInformation ugi) + throws IOException, TException, InterruptedException { + if(ugi==null) { + return msClient.allocateTableWriteIdsBatch(txnIds, endPt.database, endPt.table); + } + return (List) ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + return msClient.allocateTableWriteIdsBatch(txnIds, endPt.database, endPt.table); + } + }) ; + } + @Override public String toString() { - if (txnIds==null || txnIds.isEmpty()) { + if (txnToWriteIds==null || txnToWriteIds.isEmpty()) { return "{}"; } StringBuilder sb = new StringBuilder(" TxnStatus["); @@ -653,7 +672,8 @@ public String toString() { sb.append(state == null ? "N" : state); } sb.append("] LastUsed ").append(JavaUtils.txnIdToString(lastTxnUsed)); - return "TxnIds=[" + txnIds.get(0) + "..." + txnIds.get(txnIds.size()-1) + return "TxnIds=[" + txnToWriteIds.get(0).getTxnId() + "..." + + txnToWriteIds.get(txnToWriteIds.size()-1).getTxnId() + "] on endPoint = " + endPt + "; " + sb; } @@ -687,7 +707,7 @@ public Void run() throws TransactionError { private void beginNextTransactionImpl() throws TransactionError { state = TxnState.INACTIVE;//clear state from previous txn - if ( currentTxnIndex + 1 >= txnIds.size() ) + if ( currentTxnIndex + 1 >= txnToWriteIds.size() ) throw new InvalidTrasactionState("No more transactions available in" + " current batch for end point : " + endPt); ++currentTxnIndex; @@ -711,7 +731,19 @@ private void beginNextTransactionImpl() throws TransactionError { @Override public Long getCurrentTxnId() { if(currentTxnIndex >= 0) { - return txnIds.get(currentTxnIndex); + return txnToWriteIds.get(currentTxnIndex).getTxnId(); + } + return -1L; + } + + /** + * Get Id of currently open transaction + * @return -1 if there is no open TX + */ + @Override + public Long getCurrentWriteId() { + if(currentTxnIndex >= 0) { + return txnToWriteIds.get(currentTxnIndex).getWriteId(); } return -1L; } @@ -733,9 +765,9 @@ public TxnState getCurrentTransactionState() { @Override public int remainingTransactions() { if (currentTxnIndex>=0) { - return txnIds.size() - currentTxnIndex -1; + return txnToWriteIds.size() - currentTxnIndex -1; } - return txnIds.size(); + return txnToWriteIds.size(); } @@ -830,7 +862,7 @@ public Void run() throws StreamingException { private void writeImpl(Collection records) throws StreamingException { for (byte[] record : records) { - recordWriter.write(getCurrentTxnId(), record); + recordWriter.write(getCurrentWriteId(), record); } } @@ -875,7 +907,7 @@ public Void run() throws StreamingException { private void commitImpl() throws TransactionError, StreamingException { try { recordWriter.flush(); - msClient.commitTxn(txnIds.get(currentTxnIndex)); + msClient.commitTxn(txnToWriteIds.get(currentTxnIndex).getTxnId()); state = TxnState.COMMITTED; txnStatus[currentTxnIndex] = TxnState.COMMITTED; } catch (NoSuchTxnException e) { @@ -938,8 +970,8 @@ private void abortImpl(boolean abortAllRemaining) throws TransactionError, Strea int minOpenTxnIndex = Math.max(currentTxnIndex + (state == TxnState.ABORTED || state == TxnState.COMMITTED ? 1 : 0), 0); for(currentTxnIndex = minOpenTxnIndex; - currentTxnIndex < txnIds.size(); currentTxnIndex++) { - msClient.rollbackTxn(txnIds.get(currentTxnIndex)); + currentTxnIndex < txnToWriteIds.size(); currentTxnIndex++) { + msClient.rollbackTxn(txnToWriteIds.get(currentTxnIndex).getTxnId()); txnStatus[currentTxnIndex] = TxnState.ABORTED; } currentTxnIndex--;//since the loop left it == txnId.size() @@ -966,15 +998,15 @@ public void heartbeat() throws StreamingException, HeartBeatFailure { if(isClosed) { return; } - if(state != TxnState.OPEN && currentTxnIndex >= txnIds.size() - 1) { + if(state != TxnState.OPEN && currentTxnIndex >= txnToWriteIds.size() - 1) { //here means last txn in the batch is resolved but the close() hasn't been called yet so //there is nothing to heartbeat return; } //if here after commit()/abort() but before next beginNextTransaction(), currentTxnIndex still //points at the last txn which we don't want to heartbeat - Long first = txnIds.get(state == TxnState.OPEN ? currentTxnIndex : currentTxnIndex + 1); - Long last = txnIds.get(txnIds.size()-1); + Long first = txnToWriteIds.get(state == TxnState.OPEN ? currentTxnIndex : currentTxnIndex + 1).getTxnId(); + Long last = txnToWriteIds.get(txnToWriteIds.size()-1).getTxnId(); try { HeartbeatTxnRangeResponse resp = heartbeaterMSClient.heartbeatTxnRange(first, last); if (!resp.getAborted().isEmpty() || !resp.getNosuch().isEmpty()) { diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java index cddb8de..53352fe 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java @@ -23,10 +23,10 @@ /** Writes using a hive RecordUpdater * - * @param transactionId the ID of the Txn in which the write occurs + * @param writeId the write ID of the table mapping to Txn in which the write occurs * @param record the record to be written */ - public void write(long transactionId, byte[] record) throws StreamingException; + public void write(long writeId, byte[] record) throws StreamingException; /** Flush records from buffer. Invoked by TransactionBatch.commit() */ public void flush() throws StreamingException; @@ -36,7 +36,7 @@ /** Acquire a new RecordUpdater. Invoked when * StreamingConnection.fetchTransactionBatch() is called */ - public void newBatch(Long minTxnId, Long maxTxnID) throws StreamingException; + public void newBatch(Long minWriteId, Long maxWriteID) throws StreamingException; /** Close the RecordUpdater. Invoked by TransactionBatch.close() */ public void closeBatch() throws StreamingException; diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java index 357c537..4d92c55 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java @@ -117,15 +117,15 @@ protected HCatRecordObjectInspector getRecordObjectInspector() { @Override - public void write(long transactionId, byte[] record) + public void write(long writeId, byte[] record) throws StreamingIOFailure, SerializationError { try { Object encodedRow = encode(record); int bucket = getBucket(encodedRow); - getRecordUpdater(bucket).insert(transactionId, encodedRow); + getRecordUpdater(bucket).insert(writeId, encodedRow); } catch (IOException e) { - throw new StreamingIOFailure("Error writing record in transaction(" - + transactionId + ")", e); + throw new StreamingIOFailure("Error writing record in transaction write id(" + + writeId + ")", e); } } diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java index 58db252..ae25662 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java @@ -124,15 +124,15 @@ protected StructObjectInspector getRecordObjectInspector() { @Override - public void write(long transactionId, byte[] record) + public void write(long writeId, byte[] record) throws StreamingIOFailure, SerializationError { try { Object encodedRow = encode(record); int bucket = getBucket(encodedRow); - getRecordUpdater(bucket).insert(transactionId, encodedRow); + getRecordUpdater(bucket).insert(writeId, encodedRow); } catch (IOException e) { - throw new StreamingIOFailure("Error writing record in transaction(" - + transactionId + ")", e); + throw new StreamingIOFailure("Error writing record in transaction write id(" + + writeId + ")", e); } } diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java index e5ad475..c8ece3f 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java @@ -58,6 +58,13 @@ public String toString() { */ public Long getCurrentTxnId(); + + /** + * Get write Id mapping to currently open transaction + * @return write id + */ + public Long getCurrentWriteId(); + /** * get state of current transaction */ diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTable.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTable.java index 5b371e3..9fc9788 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTable.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTable.java @@ -34,7 +34,7 @@ private final String tableName; private final boolean createPartitions; private final TableType tableType; - private long transactionId; + private long writeId; private Table table; @@ -48,10 +48,10 @@ /** * Returns {@code 0} until such a time that a {@link Transaction} has been acquired (when * {@link MutatorClient#newTransaction()} exits), at which point this will return the - * {@link Transaction#getTransactionId() transaction id}. + * transaction id. */ - public long getTransactionId() { - return transactionId; + public long getWriteId() { + return writeId; } public String getDatabaseName() { @@ -105,8 +105,8 @@ public Table getTable() { return table; } - void setTransactionId(long transactionId) { - this.transactionId = transactionId; + void setWriteId(long writeId) { + this.writeId = writeId; } void setTable(Table table) { @@ -123,7 +123,7 @@ void setTable(Table table) { public String toString() { return "AcidTable [databaseName=" + databaseName + ", tableName=" + tableName + ", createPartitions=" + createPartitions + ", tableType=" + tableType + ", outputFormatName=" + getOutputFormatName() - + ", totalBuckets=" + getTotalBuckets() + ", transactionId=" + transactionId + "]"; + + ", totalBuckets=" + getTotalBuckets() + ", writeId=" + writeId + "]"; } } \ No newline at end of file diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTableSerializer.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTableSerializer.java index 32db5e3..bc1f6fa 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTableSerializer.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTableSerializer.java @@ -54,10 +54,10 @@ public static String encode(AcidTable table) throws IOException { data.writeUTF(table.getDatabaseName()); data.writeUTF(table.getTableName()); data.writeBoolean(table.createPartitions()); - if (table.getTransactionId() <= 0) { - LOG.warn("Transaction ID <= 0. The recipient is probably expecting a transaction ID."); + if (table.getWriteId() <= 0) { + LOG.warn("Write ID <= 0. The recipient is probably expecting a table write ID."); } - data.writeLong(table.getTransactionId()); + data.writeLong(table.getWriteId()); data.writeByte(table.getTableType().getId()); Table metaTable = table.getTable(); @@ -96,7 +96,7 @@ public static AcidTable decode(String encoded) throws IOException { int thriftLength = in.readInt(); table = new AcidTable(databaseName, tableName, createPartitions, tableType); - table.setTransactionId(transactionId); + table.setWriteId(transactionId); Table metaTable = null; if (thriftLength > 0) { diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java index 645274e..6fca66b 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hive.hcatalog.streaming.mutate.client.lock.Lock; import org.apache.hive.hcatalog.streaming.mutate.client.lock.LockFailureListener; import org.apache.thrift.TException; @@ -94,8 +95,19 @@ public Transaction newTransaction() throws TransactionException { throw new TransactionException("Not connected - cannot create transaction."); } Transaction transaction = new Transaction(metaStoreClient, lockOptions); + long txnId = transaction.getTransactionId(); for (AcidTable table : tables) { - table.setTransactionId(transaction.getTransactionId()); + try { + table.setWriteId(metaStoreClient.allocateTableWriteId(txnId, + table.getDatabaseName(), table.getTableName())); + } catch (TException ex) { + try { + metaStoreClient.rollbackTxn(txnId); + } catch (TException e) { + LOG.warn("Operation failed and rollback transaction {} failed due to {}", txnId, e.getMessage()); + } + throw new TransactionException("Unable to allocate table write ID", ex); + } } LOG.debug("Created transaction {}", transaction); return transaction; diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java index ae23153..5e804d7 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java @@ -98,11 +98,11 @@ } /** - * We expect records grouped by (partitionValues,bucketId) and ordered by (origTxnId,rowId). + * We expect records grouped by (partitionValues,bucketId) and ordered by (origWriteId,rowId). * * @throws BucketIdException The bucket ID in the {@link RecordIdentifier} of the record does not match that computed * using the values in the record's bucketed columns. - * @throws RecordSequenceException The record was submitted that was not in the correct ascending (origTxnId, rowId) + * @throws RecordSequenceException The record was submitted that was not in the correct ascending (origWriteId, rowId) * sequence. * @throws GroupRevisitedException If an event was submitted for a (partition, bucketId) combination that has already * been closed. @@ -120,11 +120,11 @@ public void insert(List partitionValues, Object record) throws WorkerExc } /** - * We expect records grouped by (partitionValues,bucketId) and ordered by (origTxnId,rowId). + * We expect records grouped by (partitionValues,bucketId) and ordered by (origWriteId,rowId). * * @throws BucketIdException The bucket ID in the {@link RecordIdentifier} of the record does not match that computed * using the values in the record's bucketed columns. - * @throws RecordSequenceException The record was submitted that was not in the correct ascending (origTxnId, rowId) + * @throws RecordSequenceException The record was submitted that was not in the correct ascending (origWriteId, rowId) * sequence. * @throws GroupRevisitedException If an event was submitted for a (partition, bucketId) combination that has already * been closed. @@ -142,11 +142,11 @@ public void update(List partitionValues, Object record) throws WorkerExc } /** - * We expect records grouped by (partitionValues,bucketId) and ordered by (origTxnId,rowId). + * We expect records grouped by (partitionValues,bucketId) and ordered by (origWriteId,rowId). * * @throws BucketIdException The bucket ID in the {@link RecordIdentifier} of the record does not match that computed * using the values in the record's bucketed columns. - * @throws RecordSequenceException The record was submitted that was not in the correct ascending (origTxnId, rowId) + * @throws RecordSequenceException The record was submitted that was not in the correct ascending (origWriteId, rowId) * sequence. * @throws GroupRevisitedException If an event was submitted for a (partition, bucketId) combination that has already * been closed. @@ -229,9 +229,9 @@ private void resetMutator(int newBucketId, List newPartitionValues, Path sequenceValidator.reset(); if (deleteDeltaIfExists) { // TODO: Should this be the concern of the mutator? - deleteDeltaIfExists(newPartitionPath, table.getTransactionId(), newBucketId); + deleteDeltaIfExists(newPartitionPath, table.getWriteId(), newBucketId); } - mutator = mutatorFactory.newMutator(outputFormat, table.getTransactionId(), newPartitionPath, newBucketId); + mutator = mutatorFactory.newMutator(outputFormat, table.getWriteId(), newPartitionPath, newBucketId); bucketId = newBucketId; partitionValues = newPartitionValues; partitionPath = newPartitionPath; @@ -282,12 +282,12 @@ private void validateRecordSequence(OperationType operationType, RecordIdentifie } /* A delta may be present from a previous failed task attempt. */ - private void deleteDeltaIfExists(Path partitionPath, long transactionId, int bucketId) throws IOException { + private void deleteDeltaIfExists(Path partitionPath, long writeId, int bucketId) throws IOException { Path deltaPath = AcidUtils.createFilename(partitionPath, new AcidOutputFormat.Options(configuration) .bucket(bucketId) - .minimumTransactionId(transactionId) - .maximumTransactionId(transactionId)); + .minimumWriteId(writeId) + .maximumWriteId(writeId)); FileSystem fileSystem = deltaPath.getFileSystem(configuration); if (fileSystem.exists(deltaPath)) { LOG.info("Deleting existing delta path: {}", deltaPath); diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorFactory.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorFactory.java index 22cd9b7..ce99ab6 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorFactory.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorFactory.java @@ -24,7 +24,7 @@ public interface MutatorFactory { - Mutator newMutator(AcidOutputFormat outputFormat, long transactionId, Path partitionPath, int bucketId) throws IOException; + Mutator newMutator(AcidOutputFormat outputFormat, long writeId, Path partitionPath, int bucketId) throws IOException; RecordInspector newRecordInspector(); diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java index 05cf8b7..84c477f 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java @@ -31,7 +31,7 @@ /** Base {@link Mutator} implementation. Creates a suitable {@link RecordUpdater} and delegates mutation events. */ public class MutatorImpl implements Mutator { - private final long transactionId; + private final long writeId; private final Path partitionPath; private final int bucketProperty; private final Configuration configuration; @@ -44,11 +44,11 @@ * @throws IOException */ public MutatorImpl(Configuration configuration, int recordIdColumn, ObjectInspector objectInspector, - AcidOutputFormat outputFormat, long transactionId, Path partitionPath, int bucketProperty) throws IOException { + AcidOutputFormat outputFormat, long writeId, Path partitionPath, int bucketProperty) throws IOException { this.configuration = configuration; this.recordIdColumn = recordIdColumn; this.objectInspector = objectInspector; - this.transactionId = transactionId; + this.writeId = writeId; this.partitionPath = partitionPath; this.bucketProperty = bucketProperty; @@ -57,17 +57,17 @@ public MutatorImpl(Configuration configuration, int recordIdColumn, ObjectInspec @Override public void insert(Object record) throws IOException { - updater.insert(transactionId, record); + updater.insert(writeId, record); } @Override public void update(Object record) throws IOException { - updater.update(transactionId, record); + updater.update(writeId, record); } @Override public void delete(Object record) throws IOException { - updater.delete(transactionId, record); + updater.delete(writeId, record); } /** @@ -89,7 +89,7 @@ public void close() throws IOException { @Override public String toString() { - return "ObjectInspectorMutator [transactionId=" + transactionId + ", partitionPath=" + partitionPath + return "ObjectInspectorMutator [writeId=" + writeId + ", partitionPath=" + partitionPath + ", bucketId=" + bucketProperty + "]"; } @@ -101,8 +101,8 @@ protected RecordUpdater createRecordUpdater(AcidOutputFormat outputFormat) new AcidOutputFormat.Options(configuration) .inspector(objectInspector) .bucket(bucketId) - .minimumTransactionId(transactionId) - .maximumTransactionId(transactionId) + .minimumWriteId(writeId) + .maximumWriteId(writeId) .recordIdColumn(recordIdColumn) .finalDestination(partitionPath) .statementId(-1)); diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/SequenceValidator.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/SequenceValidator.java index 5cd8081..97b5acc 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/SequenceValidator.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/SequenceValidator.java @@ -29,22 +29,22 @@ private static final Logger LOG = LoggerFactory.getLogger(SequenceValidator.class); - private Long lastTxId; + private Long lastWriteId; private Long lastRowId; SequenceValidator() { } boolean isInSequence(RecordIdentifier recordIdentifier) { - if (lastTxId != null && recordIdentifier.getTransactionId() < lastTxId) { - LOG.debug("Non-sequential transaction ID. Expected >{}, recordIdentifier={}", lastTxId, recordIdentifier); + if (lastWriteId != null && recordIdentifier.getWriteId() < lastWriteId) { + LOG.debug("Non-sequential transaction ID. Expected >{}, recordIdentifier={}", lastWriteId, recordIdentifier); return false; - } else if (lastTxId != null && recordIdentifier.getTransactionId() == lastTxId && lastRowId != null + } else if (lastWriteId != null && recordIdentifier.getWriteId() == lastWriteId && lastRowId != null && recordIdentifier.getRowId() <= lastRowId) { LOG.debug("Non-sequential row ID. Expected >{}, recordIdentifier={}", lastRowId, recordIdentifier); return false; } - lastTxId = recordIdentifier.getTransactionId(); + lastWriteId = recordIdentifier.getWriteId(); lastRowId = recordIdentifier.getRowId(); return true; } @@ -53,14 +53,14 @@ boolean isInSequence(RecordIdentifier recordIdentifier) { * Validator must be reset for each new partition and or bucket. */ void reset() { - lastTxId = null; + lastWriteId = null; lastRowId = null; LOG.debug("reset"); } @Override public String toString() { - return "SequenceValidator [lastTxId=" + lastTxId + ", lastRowId=" + lastRowId + "]"; + return "SequenceValidator [lastWriteId=" + lastWriteId + ", lastRowId=" + lastRowId + "]"; } } diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java index 5e12614..c5b96cd 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java @@ -45,7 +45,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.common.JavaUtils; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.Validator; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; @@ -375,16 +375,16 @@ public void testNoBuckets() throws Exception { Assert.assertEquals("", 0, BucketCodec.determineVersion(536870912).decodeWriterId(536870912)); rs = queryTable(driver,"select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID"); - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/delta_0000016_0000016_0000/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":18,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/delta_0000018_0000019/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":18,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/delta_0000018_0000019/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/delta_0000018_0000019/bucket_00000")); - Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\ta7\tb8")); - Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/delta_0000018_0000019/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/delta_0000001_0000001_0000/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); + Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":1}\ta7\tb8")); + Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); queryTable(driver, "update default.streamingnobuckets set a=0, b=0 where a='a7'"); queryTable(driver, "delete from default.streamingnobuckets where a='a1'"); @@ -399,14 +399,14 @@ public void testNoBuckets() throws Exception { runWorker(conf); rs = queryTable(driver,"select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID"); - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/base_0000022/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":18,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/base_0000022/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/base_0000022/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t0\t0")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/base_0000022/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/base_0000005/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/base_0000005/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/base_0000005/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":0}\t0\t0")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/base_0000005/bucket_00000")); } /** @@ -540,8 +540,8 @@ public void testTableValidation() throws Exception { */ private void checkDataWritten(Path partitionPath, long minTxn, long maxTxn, int buckets, int numExpectedFiles, String... records) throws Exception { - ValidTxnList txns = msClient.getValidTxns(); - AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, txns); + ValidWriteIdList writeIds = msClient.getValidWriteIds(AcidUtils.getFullTableName(dbName, tblName)); + AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, writeIds); Assert.assertEquals(0, dir.getObsolete().size()); Assert.assertEquals(0, dir.getOriginalFiles().size()); List current = dir.getCurrentDirectories(); @@ -553,8 +553,8 @@ private void checkDataWritten(Path partitionPath, long minTxn, long maxTxn, int long min = Long.MAX_VALUE; long max = Long.MIN_VALUE; for (AcidUtils.ParsedDelta pd : current) { - if (pd.getMaxTransaction() > max) max = pd.getMaxTransaction(); - if (pd.getMinTransaction() < min) min = pd.getMinTransaction(); + if (pd.getMaxWriteId() > max) max = pd.getMaxWriteId(); + if (pd.getMinWriteId() < min) min = pd.getMinWriteId(); } Assert.assertEquals(minTxn, min); Assert.assertEquals(maxTxn, max); @@ -567,7 +567,7 @@ private void checkDataWritten(Path partitionPath, long minTxn, long maxTxn, int job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string"); AcidUtils.setAcidTableScan(job,true); job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); - job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString()); + job.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIds.toString()); InputSplit[] splits = inf.getSplits(job, buckets); Assert.assertEquals(numExpectedFiles, splits.length); org.apache.hadoop.mapred.RecordReader rr = @@ -587,7 +587,7 @@ private void checkDataWritten(Path partitionPath, long minTxn, long maxTxn, int */ private void checkDataWritten2(Path partitionPath, long minTxn, long maxTxn, int numExpectedFiles, String validationQuery, boolean vectorize, String... records) throws Exception { - ValidTxnList txns = msClient.getValidTxns(); + ValidWriteIdList txns = msClient.getValidWriteIds(AcidUtils.getFullTableName(dbName, tblName)); AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, txns); Assert.assertEquals(0, dir.getObsolete().size()); Assert.assertEquals(0, dir.getOriginalFiles().size()); @@ -600,8 +600,8 @@ private void checkDataWritten2(Path partitionPath, long minTxn, long maxTxn, int long min = Long.MAX_VALUE; long max = Long.MIN_VALUE; for (AcidUtils.ParsedDelta pd : current) { - if (pd.getMaxTransaction() > max) max = pd.getMaxTransaction(); - if (pd.getMinTransaction() < min) min = pd.getMinTransaction(); + if (pd.getMaxWriteId() > max) max = pd.getMaxWriteId(); + if (pd.getMinWriteId() < min) min = pd.getMinWriteId(); } Assert.assertEquals(minTxn, min); Assert.assertEquals(maxTxn, max); @@ -625,8 +625,8 @@ private void checkDataWritten2(Path partitionPath, long minTxn, long maxTxn, int } private void checkNothingWritten(Path partitionPath) throws Exception { - ValidTxnList txns = msClient.getValidTxns(); - AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, txns); + ValidWriteIdList writeIds = msClient.getValidWriteIds(AcidUtils.getFullTableName(dbName, tblName)); + AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, writeIds); Assert.assertEquals(0, dir.getObsolete().size()); Assert.assertEquals(0, dir.getOriginalFiles().size()); List current = dir.getCurrentDirectories(); @@ -865,7 +865,7 @@ private void testTransactionBatchCommit_Delimited(UserGroupInformation ugi) thro txnBatch.write("1,Hello streaming".getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(TransactionBatch.TxnState.COMMITTED , txnBatch.getCurrentTransactionState()); @@ -877,11 +877,11 @@ private void testTransactionBatchCommit_Delimited(UserGroupInformation ugi) thro txnBatch.write("2,Welcome to streaming".getBytes()); // data should not be visible - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); txnBatch.commit(); - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); txnBatch.close(); @@ -933,7 +933,7 @@ private void testTransactionBatchCommit_Regex(UserGroupInformation ugi) throws E txnBatch.write("1,Hello streaming".getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(TransactionBatch.TxnState.COMMITTED , txnBatch.getCurrentTransactionState()); @@ -945,11 +945,11 @@ private void testTransactionBatchCommit_Regex(UserGroupInformation ugi) throws E txnBatch.write("2,Welcome to streaming".getBytes()); // data should not be visible - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); txnBatch.commit(); - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); txnBatch.close(); @@ -995,7 +995,7 @@ public void testTransactionBatchCommit_Json() throws Exception { txnBatch.write(rec1.getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(TransactionBatch.TxnState.COMMITTED , txnBatch.getCurrentTransactionState()); @@ -1122,7 +1122,7 @@ public void testTransactionBatchAbortAndCommit() throws Exception { txnBatch.write("2,Welcome to streaming".getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 14, 23, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); txnBatch.close(); @@ -1141,13 +1141,13 @@ public void testMultipleTransactionBatchCommits() throws Exception { txnBatch.write("1,Hello streaming".getBytes()); txnBatch.commit(); String validationQuery = "select id, msg from " + dbName + "." + tblName + " order by id, msg"; - checkDataWritten2(partLoc, 15, 24, 1, validationQuery, false, "1\tHello streaming"); + checkDataWritten2(partLoc, 1, 10, 1, validationQuery, false, "1\tHello streaming"); txnBatch.beginNextTransaction(); txnBatch.write("2,Welcome to streaming".getBytes()); txnBatch.commit(); - checkDataWritten2(partLoc, 15, 24, 1, validationQuery, true, "1\tHello streaming", + checkDataWritten2(partLoc, 1, 10, 1, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming"); txnBatch.close(); @@ -1158,14 +1158,14 @@ public void testMultipleTransactionBatchCommits() throws Exception { txnBatch.write("3,Hello streaming - once again".getBytes()); txnBatch.commit(); - checkDataWritten2(partLoc, 15, 40, 2, validationQuery, false, "1\tHello streaming", + checkDataWritten2(partLoc, 1, 20, 2, validationQuery, false, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again"); txnBatch.beginNextTransaction(); txnBatch.write("4,Welcome to streaming - once again".getBytes()); txnBatch.commit(); - checkDataWritten2(partLoc, 15, 40, 2, validationQuery, true, "1\tHello streaming", + checkDataWritten2(partLoc, 1, 20, 2, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again", "4\tWelcome to streaming - once again"); @@ -1202,14 +1202,14 @@ public void testInterleavedTransactionBatchCommits() throws Exception { txnBatch2.commit(); String validationQuery = "select id, msg from " + dbName + "." + tblName + " order by id, msg"; - checkDataWritten2(partLoc, 24, 33, 1, + checkDataWritten2(partLoc, 11, 20, 1, validationQuery, true, "3\tHello streaming - once again"); txnBatch1.commit(); /*now both batches have committed (but not closed) so we for each primary file we expect a side file to exist and indicate the true length of primary file*/ FileSystem fs = partLoc.getFileSystem(conf); - AcidUtils.Directory dir = AcidUtils.getAcidState(partLoc, conf, msClient.getValidTxns()); + AcidUtils.Directory dir = AcidUtils.getAcidState(partLoc, conf, msClient.getValidWriteIds(AcidUtils.getFullTableName(dbName, tblName))); for(AcidUtils.ParsedDelta pd : dir.getCurrentDirectories()) { for(FileStatus stat : fs.listStatus(pd.getPath(), AcidUtils.bucketFileFilter)) { Path lengthFile = OrcAcidUtils.getSideFile(stat.getPath()); @@ -1222,7 +1222,7 @@ public void testInterleavedTransactionBatchCommits() throws Exception { Assert.assertTrue("", logicalLength == actualLength); } } - checkDataWritten2(partLoc, 14, 33, 2, + checkDataWritten2(partLoc, 1, 20, 2, validationQuery, false,"1\tHello streaming", "3\tHello streaming - once again"); txnBatch1.beginNextTransaction(); @@ -1234,7 +1234,7 @@ public void testInterleavedTransactionBatchCommits() throws Exception { //so each of 2 deltas has 1 bucket0 and 1 bucket0_flush_length. Furthermore, each bucket0 //has now received more data(logically - it's buffered) but it is not yet committed. //lets check that side files exist, etc - dir = AcidUtils.getAcidState(partLoc, conf, msClient.getValidTxns()); + dir = AcidUtils.getAcidState(partLoc, conf, msClient.getValidWriteIds(AcidUtils.getFullTableName(dbName, tblName))); for(AcidUtils.ParsedDelta pd : dir.getCurrentDirectories()) { for(FileStatus stat : fs.listStatus(pd.getPath(), AcidUtils.bucketFileFilter)) { Path lengthFile = OrcAcidUtils.getSideFile(stat.getPath()); @@ -1247,19 +1247,19 @@ public void testInterleavedTransactionBatchCommits() throws Exception { Assert.assertTrue("", logicalLength <= actualLength); } } - checkDataWritten2(partLoc, 14, 33, 2, + checkDataWritten2(partLoc, 1, 20, 2, validationQuery, true,"1\tHello streaming", "3\tHello streaming - once again"); txnBatch1.commit(); - checkDataWritten2(partLoc, 14, 33, 2, + checkDataWritten2(partLoc, 1, 20, 2, validationQuery, false, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again"); txnBatch2.commit(); - checkDataWritten2(partLoc, 14, 33, 2, + checkDataWritten2(partLoc, 1, 20, 2, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again", @@ -2268,8 +2268,8 @@ private FaultyWriter(RecordWriter delegate) { this.delegate = delegate; } @Override - public void write(long transactionId, byte[] record) throws StreamingException { - delegate.write(transactionId, record); + public void write(long writeId, byte[] record) throws StreamingException { + delegate.write(writeId, record); produceFault(); } @Override diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/ReflectiveMutatorFactory.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/ReflectiveMutatorFactory.java index e057da7..c05ddcf 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/ReflectiveMutatorFactory.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/ReflectiveMutatorFactory.java @@ -49,9 +49,9 @@ public ReflectiveMutatorFactory(Configuration configuration, Class recordClas } @Override - public Mutator newMutator(AcidOutputFormat outputFormat, long transactionId, Path partitionPath, int bucketId) + public Mutator newMutator(AcidOutputFormat outputFormat, long writeId, Path partitionPath, int bucketId) throws IOException { - return new MutatorImpl(configuration, recordIdColumn, objectInspector, outputFormat, transactionId, partitionPath, + return new MutatorImpl(configuration, recordIdColumn, objectInspector, outputFormat, writeId, partitionPath, bucketId); } diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java index c98d22b..e72b9fa 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java @@ -25,7 +25,7 @@ import java.util.List; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -71,7 +71,7 @@ public StreamingAssert newStreamingAssert(Table table, List partition) t private List partition; private IMetaStoreClient metaStoreClient; private Directory dir; - private ValidTxnList txns; + private ValidWriteIdList writeIds; private List currentDeltas; private long min; private long max; @@ -83,9 +83,9 @@ public StreamingAssert newStreamingAssert(Table table, List partition) t this.table = table; this.partition = partition; - txns = metaStoreClient.getValidTxns(); + writeIds = metaStoreClient.getValidWriteIds(AcidUtils.getFullTableName(table.getDbName(), table.getTableName())); partitionLocation = getPartitionLocation(); - dir = AcidUtils.getAcidState(partitionLocation, conf, txns); + dir = AcidUtils.getAcidState(partitionLocation, conf, writeIds); assertEquals(0, dir.getObsolete().size()); assertEquals(0, dir.getOriginalFiles().size()); @@ -95,8 +95,8 @@ public StreamingAssert newStreamingAssert(Table table, List partition) t System.out.println("Files found: "); for (AcidUtils.ParsedDelta parsedDelta : currentDeltas) { System.out.println(parsedDelta.getPath().toString()); - max = Math.max(parsedDelta.getMaxTransaction(), max); - min = Math.min(parsedDelta.getMinTransaction(), min); + max = Math.max(parsedDelta.getMaxWriteId(), max); + min = Math.min(parsedDelta.getMinWriteId(), min); } } @@ -145,7 +145,7 @@ public void assertMaxTransactionId(long expectedMaxTransactionId) { job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string"); AcidUtils.setAcidTableScan(job,true); job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); - job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString()); + job.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIds.toString()); InputSplit[] splits = inputFormat.getSplits(job, 1); assertEquals(numSplitsExpected, splits.length); @@ -160,7 +160,7 @@ public void assertMaxTransactionId(long expectedMaxTransactionId) { while (recordReader.next(key, value)) { RecordIdentifier recordIdentifier = recordReader.getRecordIdentifier(); - Record record = new Record(new RecordIdentifier(recordIdentifier.getTransactionId(), + Record record = new Record(new RecordIdentifier(recordIdentifier.getWriteId(), recordIdentifier.getBucketProperty(), recordIdentifier.getRowId()), value.toString()); System.out.println(record); records.add(record); diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestAcidTableSerializer.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestAcidTableSerializer.java index 7876e8d..1523a10 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestAcidTableSerializer.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestAcidTableSerializer.java @@ -45,7 +45,7 @@ public void testSerializeDeserialize() throws Exception { AcidTable acidTable = new AcidTable("db_1", "table_1", true, TableType.SINK); acidTable.setTable(table); - acidTable.setTransactionId(42L); + acidTable.setWriteId(42L); String encoded = AcidTableSerializer.encode(acidTable); System.out.println(encoded); @@ -57,7 +57,7 @@ public void testSerializeDeserialize() throws Exception { assertThat(decoded.getOutputFormatName(), is("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat")); assertThat(decoded.getTotalBuckets(), is(10)); assertThat(decoded.getQualifiedName(), is("DB_1.TABLE_1")); - assertThat(decoded.getTransactionId(), is(42L)); + assertThat(decoded.getWriteId(), is(42L)); assertThat(decoded.getTableType(), is(TableType.SINK)); assertThat(decoded.getTable(), is(table)); } @@ -75,7 +75,7 @@ public void testSerializeDeserializeNoTableNoTransaction() throws Exception { assertThat(decoded.getOutputFormatName(), is(nullValue())); assertThat(decoded.getTotalBuckets(), is(0)); assertThat(decoded.getQualifiedName(), is("DB_1.TABLE_1")); - assertThat(decoded.getTransactionId(), is(0L)); + assertThat(decoded.getWriteId(), is(0L)); assertThat(decoded.getTableType(), is(TableType.SINK)); assertThat(decoded.getTable(), is(nullValue())); } diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestMutatorClient.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestMutatorClient.java index cfe3a96..91b90ed 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestMutatorClient.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestMutatorClient.java @@ -48,6 +48,8 @@ public class TestMutatorClient { private static final long TRANSACTION_ID = 42L; + private static final long WRITE_ID1 = 78L; + private static final long WRITE_ID2 = 33L; private static final String TABLE_NAME_1 = "TABLE_1"; private static final String TABLE_NAME_2 = "TABLE_2"; private static final String DB_NAME = "DB_1"; @@ -89,6 +91,8 @@ public void configureMocks() throws Exception { when(mockParameters.get("transactional")).thenReturn(Boolean.TRUE.toString()); when(mockMetaStoreClient.openTxn(USER)).thenReturn(TRANSACTION_ID); + when(mockMetaStoreClient.allocateTableWriteId(TRANSACTION_ID, DB_NAME, TABLE_NAME_1)).thenReturn(WRITE_ID1); + when(mockMetaStoreClient.allocateTableWriteId(TRANSACTION_ID, DB_NAME, TABLE_NAME_2)).thenReturn(WRITE_ID2); client = new MutatorClient(mockMetaStoreClient, mockConfiguration, mockLockFailureListener, USER, Collections.singletonList(TABLE_1)); @@ -110,13 +114,13 @@ public void testCheckValidTableConnect() throws Exception { assertThat(outTables.get(0).getTableName(), is(TABLE_NAME_1)); assertThat(outTables.get(0).getTotalBuckets(), is(2)); assertThat(outTables.get(0).getOutputFormatName(), is(OrcOutputFormat.class.getName())); - assertThat(outTables.get(0).getTransactionId(), is(0L)); + assertThat(outTables.get(0).getWriteId(), is(0L)); assertThat(outTables.get(0).getTable(), is(mockTable1)); assertThat(outTables.get(1).getDatabaseName(), is(DB_NAME)); assertThat(outTables.get(1).getTableName(), is(TABLE_NAME_2)); assertThat(outTables.get(1).getTotalBuckets(), is(2)); assertThat(outTables.get(1).getOutputFormatName(), is(OrcOutputFormat.class.getName())); - assertThat(outTables.get(1).getTransactionId(), is(0L)); + assertThat(outTables.get(1).getWriteId(), is(0L)); assertThat(outTables.get(1).getTable(), is(mockTable2)); } @@ -179,8 +183,8 @@ public void testNewTransaction() throws Exception { assertThat(transaction.getTransactionId(), is(TRANSACTION_ID)); assertThat(transaction.getState(), is(TxnState.INACTIVE)); - assertThat(outTables.get(0).getTransactionId(), is(TRANSACTION_ID)); - assertThat(outTables.get(1).getTransactionId(), is(TRANSACTION_ID)); + assertThat(outTables.get(0).getWriteId(), is(WRITE_ID1)); + assertThat(outTables.get(1).getWriteId(), is(WRITE_ID2)); } @Test diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorCoordinator.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorCoordinator.java index d897477..fab56b3 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorCoordinator.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorCoordinator.java @@ -49,7 +49,7 @@ private static final List UNPARTITIONED = Collections. emptyList(); private static final List PARTITION_B = Arrays.asList("B"); private static final List PARTITION_A = Arrays.asList("A"); - private static final long TRANSACTION_ID = 2L; + private static final long WRITE_ID = 2L; private static final int BUCKET_ID = 0; private static final Path PATH_A = new Path("X"); private static final Path PATH_B = new Path("B"); @@ -84,7 +84,7 @@ public void createCoordinator() throws Exception { when(mockAcidTable.getOutputFormatName()).thenReturn(OrcOutputFormat.class.getName()); when(mockAcidTable.getTotalBuckets()).thenReturn(1); - when(mockAcidTable.getTransactionId()).thenReturn(TRANSACTION_ID); + when(mockAcidTable.getWriteId()).thenReturn(WRITE_ID); when(mockAcidTable.createPartitions()).thenReturn(true); when(mockMutatorFactory.newRecordInspector()).thenReturn(mockRecordInspector); when(mockMutatorFactory.newBucketIdResolver(anyInt())).thenReturn(mockBucketIdResolver); @@ -104,7 +104,7 @@ public void insert() throws Exception { coordinator.insert(UNPARTITIONED, RECORD); verify(mockPartitionHelper).createPartitionIfNotExists(UNPARTITIONED); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); verify(mockMutator).insert(RECORD); } @@ -115,7 +115,7 @@ public void multipleInserts() throws Exception { coordinator.insert(UNPARTITIONED, RECORD); verify(mockPartitionHelper).createPartitionIfNotExists(UNPARTITIONED); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); verify(mockMutator, times(3)).insert(RECORD); } @@ -129,8 +129,8 @@ public void insertPartitionChanges() throws Exception { verify(mockPartitionHelper).createPartitionIfNotExists(PARTITION_A); verify(mockPartitionHelper).createPartitionIfNotExists(PARTITION_B); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_B), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_B), eq(BUCKET_ID)); verify(mockMutator, times(2)).insert(RECORD); } @@ -143,9 +143,9 @@ public void bucketChanges() throws Exception { coordinator.update(UNPARTITIONED, RECORD); coordinator.delete(UNPARTITIONED, RECORD); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); verify(mockMutatorFactory) - .newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID + 1)); + .newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID + 1)); verify(mockMutator).update(RECORD); verify(mockMutator).delete(RECORD); } @@ -166,11 +166,11 @@ public void partitionThenBucketChanges() throws Exception { coordinator.update(PARTITION_B, RECORD); /* PbB1 */ verify(mockPartitionHelper).createPartitionIfNotExists(PARTITION_B); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); - verify(mockMutatorFactory, times(2)).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_B), + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory, times(2)).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_B), eq(BUCKET_ID)); verify(mockMutatorFactory) - .newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_B), eq(BUCKET_ID + 1)); + .newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_B), eq(BUCKET_ID + 1)); verify(mockMutator, times(2)).update(RECORD); verify(mockMutator).delete(RECORD); verify(mockMutator).insert(RECORD); @@ -197,7 +197,7 @@ public void outOfSequence() throws Exception { coordinator.delete(UNPARTITIONED, RECORD); verify(mockPartitionHelper).createPartitionIfNotExists(UNPARTITIONED); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); verify(mockMutator).update(RECORD); verify(mockMutator).delete(RECORD); } @@ -210,7 +210,7 @@ public void revisitGroup() throws Exception { coordinator.delete(UNPARTITIONED, RECORD); verify(mockPartitionHelper).createPartitionIfNotExists(UNPARTITIONED); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); verify(mockMutator).update(RECORD); verify(mockMutator).delete(RECORD); } diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorImpl.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorImpl.java index 2273e06..d2c89e5 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorImpl.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorImpl.java @@ -49,7 +49,7 @@ private static final int RECORD_ID_COLUMN = 2; private static final int BUCKET_ID = 0; private static final Path PATH = new Path("X"); - private static final long TRANSACTION_ID = 1L; + private static final long WRITE_ID = 1L; @Mock private AcidOutputFormat mockOutputFormat; @@ -67,7 +67,7 @@ @Before public void injectMocks() throws IOException { when(mockOutputFormat.getRecordUpdater(eq(PATH), any(Options.class))).thenReturn(mockRecordUpdater); - mutator = new MutatorImpl(configuration, RECORD_ID_COLUMN, mockObjectInspector, mockOutputFormat, TRANSACTION_ID, + mutator = new MutatorImpl(configuration, RECORD_ID_COLUMN, mockObjectInspector, mockOutputFormat, WRITE_ID, PATH, BUCKET_ID); } @@ -79,26 +79,26 @@ public void testCreatesRecordReader() throws IOException { assertThat(options.getConfiguration(), is((Configuration) configuration)); assertThat(options.getInspector(), is(mockObjectInspector)); assertThat(options.getRecordIdColumn(), is(RECORD_ID_COLUMN)); - assertThat(options.getMinimumTransactionId(), is(TRANSACTION_ID)); - assertThat(options.getMaximumTransactionId(), is(TRANSACTION_ID)); + assertThat(options.getMinimumWriteId(), is(WRITE_ID)); + assertThat(options.getMaximumWriteId(), is(WRITE_ID)); } @Test public void testInsertDelegates() throws IOException { mutator.insert(RECORD); - verify(mockRecordUpdater).insert(TRANSACTION_ID, RECORD); + verify(mockRecordUpdater).insert(WRITE_ID, RECORD); } @Test public void testUpdateDelegates() throws IOException { mutator.update(RECORD); - verify(mockRecordUpdater).update(TRANSACTION_ID, RECORD); + verify(mockRecordUpdater).update(WRITE_ID, RECORD); } @Test public void testDeleteDelegates() throws IOException { mutator.delete(RECORD); - verify(mockRecordUpdater).delete(TRANSACTION_ID, RECORD); + verify(mockRecordUpdater).delete(WRITE_ID, RECORD); } @Test diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java index 7967a24..53ae2c0 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java @@ -244,7 +244,7 @@ public void testNonStandardConversion01() throws Exception { {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t1\t2", AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/000000_0"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t3\t4", AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/000000_0"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t9\t10", AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "2/000000_0"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t70\t80", "delta_0000021_0000021_0000/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t70\t80", "delta_0000001_0000001_0000/bucket_00000"} }; Assert.assertEquals("Unexpected row count after update", expected2.length, rs.size()); //verify data and layout @@ -256,7 +256,7 @@ public void testNonStandardConversion01() throws Exception { FileSystem fs = FileSystem.get(hiveConf); FileStatus[] status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + (Table.NONACIDNONBUCKET).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); - String[] expectedDelDelta = {"delete_delta_0000021_0000021_0000", "delete_delta_0000022_0000022_0000"}; + String[] expectedDelDelta = {"delete_delta_0000001_0000001_0000", "delete_delta_0000002_0000002_0000"}; for(FileStatus stat : status) { for(int i = 0; i < expectedDelDelta.length; i++) { if(expectedDelDelta[i] != null && stat.getPath().toString().endsWith(expectedDelDelta[i])) { @@ -285,7 +285,7 @@ public void testNonStandardConversion01() throws Exception { //check we have right delete delta files after minor compaction status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + (Table.NONACIDNONBUCKET).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); - String[] expectedDelDelta2 = {"delete_delta_0000021_0000021_0000", "delete_delta_0000022_0000022_0000", "delete_delta_0000021_0000022"}; + String[] expectedDelDelta2 = {"delete_delta_0000001_0000001_0000", "delete_delta_0000002_0000002_0000", "delete_delta_0000001_0000002"}; for(FileStatus stat : status) { for(int i = 0; i < expectedDelDelta2.length; i++) { if(expectedDelDelta2[i] != null && stat.getPath().toString().endsWith(expectedDelDelta2[i])) { @@ -309,7 +309,7 @@ public void testNonStandardConversion01() throws Exception { for(int i = 0; i < expected2.length; i++) { Assert.assertTrue("Actual line " + i + " bc: " + rs.get(i), rs.get(i).startsWith(expected2[i][0])); //everything is now in base/ - Assert.assertTrue("Actual line(file) " + i + " bc: " + rs.get(i), rs.get(i).endsWith("base_0000022/bucket_00000")); + Assert.assertTrue("Actual line(file) " + i + " bc: " + rs.get(i), rs.get(i).endsWith("base_0000002/bucket_00000")); } } /** @@ -453,12 +453,12 @@ public void testCtasTezUnion() throws Exception { /* * Expected result 0th entry is the RecordIdentifier + data. 1st entry file before compact*/ String expected[][] = { - {"{\"transactionid\":18,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000018_0000018_0001/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000018_0000018_0001/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870913,\"rowid\":2}\t5\t6", "/delta_0000018_0000018_0001/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870914,\"rowid\":0}\t9\t10", "/delta_0000018_0000018_0002/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870914,\"rowid\":1}\t7\t8", "/delta_0000018_0000018_0002/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870914,\"rowid\":2}\t5\t6", "/delta_0000018_0000018_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":2}\t5\t6", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":0}\t9\t10", "/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":1}\t7\t8", "/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":2}\t5\t6", "/delta_0000001_0000001_0002/bucket_00000"}, }; Assert.assertEquals("Unexpected row count after ctas", expected.length, rs.size()); //verify data and layout @@ -475,10 +475,10 @@ public void testCtasTezUnion() throws Exception { LOG.warn(s); } String[][] expected2 = { - {"{\"transactionid\":18,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000018_0000018_0001/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000018_0000018_0001/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870914,\"rowid\":0}\t9\t10", "/delta_0000018_0000018_0002/bucket_00000"}, - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":0}\t70\t80", "delta_0000020_0000020_0000/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":0}\t9\t10", "/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t70\t80", "/delta_0000002_0000002_0000/bucket_00000"} }; Assert.assertEquals("Unexpected row count after update", expected2.length, rs.size()); //verify data and layout @@ -490,7 +490,7 @@ public void testCtasTezUnion() throws Exception { FileSystem fs = FileSystem.get(hiveConf); FileStatus[] status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + (Table.ACIDNOBUCKET).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); - String[] expectedDelDelta = {"delete_delta_0000020_0000020_0000", "delete_delta_0000021_0000021_0000"}; + String[] expectedDelDelta = {"delete_delta_0000002_0000002_0000", "delete_delta_0000003_0000003_0000"}; for(FileStatus stat : status) { for(int i = 0; i < expectedDelDelta.length; i++) { if(expectedDelDelta[i] != null && stat.getPath().toString().endsWith(expectedDelDelta[i])) { @@ -519,7 +519,7 @@ public void testCtasTezUnion() throws Exception { //check we have right delete delta files after minor compaction status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + (Table.ACIDNOBUCKET).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); - String[] expectedDelDelta2 = { "delete_delta_0000020_0000020_0000", "delete_delta_0000021_0000021_0000", "delete_delta_0000018_0000021"}; + String[] expectedDelDelta2 = { "delete_delta_0000002_0000002_0000", "delete_delta_0000003_0000003_0000", "delete_delta_0000001_0000003"}; for(FileStatus stat : status) { for(int i = 0; i < expectedDelDelta2.length; i++) { if(expectedDelDelta2[i] != null && stat.getPath().toString().endsWith(expectedDelDelta2[i])) { @@ -543,7 +543,7 @@ public void testCtasTezUnion() throws Exception { for(int i = 0; i < expected2.length; i++) { Assert.assertTrue("Actual line " + i + " bc: " + rs.get(i), rs.get(i).startsWith(expected2[i][0])); //everything is now in base/ - Assert.assertTrue("Actual line(file) " + i + " bc: " + rs.get(i), rs.get(i).endsWith("base_0000021/bucket_00000")); + Assert.assertTrue("Actual line(file) " + i + " bc: " + rs.get(i), rs.get(i).endsWith("base_0000003/bucket_00000")); } } /** @@ -638,17 +638,17 @@ public void testAcidInsertWithRemoveUnion() throws Exception { ├── HIVE_UNION_SUBDIR_1 │   └── 000000_0 │   ├── _orc_acid_version - │   └── delta_0000019_0000019_0001 + │   └── delta_0000001_0000001_0001 │   └── bucket_00000 ├── HIVE_UNION_SUBDIR_2 │   └── 000000_0 │   ├── _orc_acid_version - │   └── delta_0000019_0000019_0002 + │   └── delta_0000001_0000001_0002 │   └── bucket_00000 └── HIVE_UNION_SUBDIR_3 └── 000000_0 ├── _orc_acid_version - └── delta_0000019_0000019_0003 + └── delta_0000001_0000001_0003 └── bucket_00000 10 directories, 6 files */ @@ -660,11 +660,11 @@ public void testAcidInsertWithRemoveUnion() throws Exception { } String[][] expected2 = { - {"{\"transactionid\":19,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "warehouse/t/delta_0000019_0000019_0001/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "warehouse/t/delta_0000019_0000019_0001/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870914,\"rowid\":0}\t5\t6", "warehouse/t/delta_0000019_0000019_0002/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870914,\"rowid\":1}\t7\t8", "warehouse/t/delta_0000019_0000019_0002/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "warehouse/t/delta_0000019_0000019_0003/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "warehouse/t/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "warehouse/t/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":0}\t5\t6", "warehouse/t/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":1}\t7\t8", "warehouse/t/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "warehouse/t/delta_0000001_0000001_0003/bucket_00000"} }; Assert.assertEquals("Unexpected row count", expected2.length, rs.size()); for(int i = 0; i < expected2.length; i++) { @@ -688,11 +688,11 @@ public void testBucketedAcidInsertWithRemoveUnion() throws Exception { └── -ext-10000 ├── 000000_0 │   ├── _orc_acid_version - │   └── delta_0000021_0000021_0000 + │   └── delta_0000001_0000001_0000 │   └── bucket_00000 └── 000001_0 ├── _orc_acid_version - └── delta_0000021_0000021_0000 + └── delta_0000001_0000001_0000 └── bucket_00001 5 directories, 4 files @@ -705,11 +705,11 @@ public void testBucketedAcidInsertWithRemoveUnion() throws Exception { LOG.warn(s); } String[][] expected2 = { - {"{\"transactionid\":21,\"bucketid\":536936448,\"rowid\":0}\t1\t2", "warehouse/t/delta_0000021_0000021_0000/bucket_00001"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t2\t4", "warehouse/t/delta_0000021_0000021_0000/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536936448,\"rowid\":2}\t5\t6", "warehouse/t/delta_0000021_0000021_0000/bucket_00001"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t6\t8", "warehouse/t/delta_0000021_0000021_0000/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536936448,\"rowid\":1}\t9\t10", "warehouse/t/delta_0000021_0000021_0000/bucket_00001"} + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t2", "warehouse/t/delta_0000001_0000001_0000/bucket_00001"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t2\t4", "warehouse/t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":2}\t5\t6", "warehouse/t/delta_0000001_0000001_0000/bucket_00001"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t6\t8", "warehouse/t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t9\t10", "warehouse/t/delta_0000001_0000001_0000/bucket_00001"} }; Assert.assertEquals("Unexpected row count", expected2.length, rs.size()); for(int i = 0; i < expected2.length; i++) { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index 1305902..424f100 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -36,7 +36,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; @@ -662,17 +662,17 @@ public void minorCompactWhileStreaming() throws Exception { Path resultFile = null; for (int i = 0; i < names.length; i++) { names[i] = stat[i].getPath().getName(); - if (names[i].equals("delta_0000003_0000006")) { + if (names[i].equals("delta_0000001_0000004")) { resultFile = stat[i].getPath(); } } Arrays.sort(names); - String[] expected = new String[]{"delta_0000003_0000004", - "delta_0000003_0000006", "delta_0000005_0000006", "delta_0000007_0000008"}; + String[] expected = new String[]{"delta_0000001_0000002", + "delta_0000001_0000004", "delta_0000003_0000004", "delta_0000005_0000006"}; if (!Arrays.deepEquals(expected, names)) { Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names)); } - checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty, 0, 3L, 6L, 1); + checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); } finally { connection.close(); @@ -722,11 +722,11 @@ public void majorCompactWhileStreaming() throws Exception { FileStatus[] stat = fs.listStatus(new Path(table.getSd().getLocation()), AcidUtils.baseFileFilter); if (1 != stat.length) { - Assert.fail("Expecting 1 file \"base_0000006\" and found " + stat.length + " files " + Arrays.toString(stat)); + Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat)); } String name = stat[0].getPath().getName(); - Assert.assertEquals(name, "base_0000006"); - checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 3L, 6L, 1); + Assert.assertEquals(name, "base_0000004"); + checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); } finally { connection.close(); } @@ -782,17 +782,17 @@ public void minorCompactAfterAbort() throws Exception { Path resultDelta = null; for (int i = 0; i < names.length; i++) { names[i] = stat[i].getPath().getName(); - if (names[i].equals("delta_0000003_0000006")) { + if (names[i].equals("delta_0000001_0000004")) { resultDelta = stat[i].getPath(); } } Arrays.sort(names); - String[] expected = new String[]{"delta_0000003_0000004", - "delta_0000003_0000006", "delta_0000005_0000006"}; + String[] expected = new String[]{"delta_0000001_0000002", + "delta_0000001_0000004", "delta_0000003_0000004"}; if (!Arrays.deepEquals(expected, names)) { Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names)); } - checkExpectedTxnsPresent(null, new Path[]{resultDelta}, columnNamesProperty, columnTypesProperty, 0, 3L, 6L, 1); + checkExpectedTxnsPresent(null, new Path[]{resultDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); } finally { connection.close(); } @@ -848,13 +848,13 @@ public void majorCompactAfterAbort() throws Exception { Assert.fail("majorCompactAfterAbort FileStatus[] stat " + Arrays.toString(stat)); } if (1 != stat.length) { - Assert.fail("Expecting 1 file \"base_0000006\" and found " + stat.length + " files " + Arrays.toString(stat)); + Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat)); } String name = stat[0].getPath().getName(); - if (!name.equals("base_0000006")) { - Assert.fail("majorCompactAfterAbort name " + name + " not equals to base_0000006"); + if (!name.equals("base_0000004")) { + Assert.fail("majorCompactAfterAbort name " + name + " not equals to base_0000004"); } - checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 3L, 6L, 1); + checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); } finally { connection.close(); } @@ -903,11 +903,11 @@ public void majorCompactWhileStreamingForSplitUpdate() throws Exception { FileStatus[] stat = fs.listStatus(new Path(table.getSd().getLocation()), AcidUtils.baseFileFilter); if (1 != stat.length) { - Assert.fail("Expecting 1 file \"base_0000006\" and found " + stat.length + " files " + Arrays.toString(stat)); + Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat)); } String name = stat[0].getPath().getName(); - Assert.assertEquals(name, "base_0000006"); - checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 3L, 6L, 2); + Assert.assertEquals(name, "base_0000004"); + checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 2); } finally { connection.close(); } @@ -961,16 +961,16 @@ public void testMinorCompactionForSplitUpdateWithInsertsAndDeletes() throws Exce Path minorCompactedDelta = null; for (int i = 0; i < deltas.length; i++) { deltas[i] = stat[i].getPath().getName(); - if (deltas[i].equals("delta_0000003_0000005")) { + if (deltas[i].equals("delta_0000001_0000003")) { minorCompactedDelta = stat[i].getPath(); } } Arrays.sort(deltas); - String[] expectedDeltas = new String[]{"delta_0000003_0000003_0000", "delta_0000003_0000005", "delta_0000004_0000004_0000"}; + String[] expectedDeltas = new String[]{"delta_0000001_0000001_0000", "delta_0000001_0000003", "delta_0000002_0000002_0000"}; if (!Arrays.deepEquals(expectedDeltas, deltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas)); } - checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 3L, 4L, 1); + checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 2L, 1); // Verify that we have got correct set of delete_deltas. FileStatus[] deleteDeltaStat = @@ -979,16 +979,16 @@ public void testMinorCompactionForSplitUpdateWithInsertsAndDeletes() throws Exce Path minorCompactedDeleteDelta = null; for (int i = 0; i < deleteDeltas.length; i++) { deleteDeltas[i] = deleteDeltaStat[i].getPath().getName(); - if (deleteDeltas[i].equals("delete_delta_0000003_0000005")) { + if (deleteDeltas[i].equals("delete_delta_0000001_0000003")) { minorCompactedDeleteDelta = deleteDeltaStat[i].getPath(); } } Arrays.sort(deleteDeltas); - String[] expectedDeleteDeltas = new String[]{"delete_delta_0000003_0000005", "delete_delta_0000005_0000005_0000"}; + String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000003", "delete_delta_0000003_0000003_0000"}; if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas)); } - checkExpectedTxnsPresent(null, new Path[]{minorCompactedDeleteDelta}, columnNamesProperty, columnTypesProperty, 0, 4L, 4L, 1); + checkExpectedTxnsPresent(null, new Path[]{minorCompactedDeleteDelta}, columnNamesProperty, columnTypesProperty, 0, 2L, 2L, 1); } @Test @@ -1038,16 +1038,16 @@ public void testMinorCompactionForSplitUpdateWithOnlyInserts() throws Exception Path minorCompactedDelta = null; for (int i = 0; i < deltas.length; i++) { deltas[i] = stat[i].getPath().getName(); - if (deltas[i].equals("delta_0000003_0000004")) { + if (deltas[i].equals("delta_0000001_0000002")) { minorCompactedDelta = stat[i].getPath(); } } Arrays.sort(deltas); - String[] expectedDeltas = new String[]{"delta_0000003_0000003_0000", "delta_0000003_0000004", "delta_0000004_0000004_0000"}; + String[] expectedDeltas = new String[]{"delta_0000001_0000001_0000", "delta_0000001_0000002", "delta_0000002_0000002_0000"}; if (!Arrays.deepEquals(expectedDeltas, deltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas)); } - checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 3L, 4L, 1); + checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 2L, 1); // Verify that we have got correct set of delete_deltas. FileStatus[] deleteDeltaStat = @@ -1056,12 +1056,12 @@ public void testMinorCompactionForSplitUpdateWithOnlyInserts() throws Exception Path minorCompactedDeleteDelta = null; for (int i = 0; i < deleteDeltas.length; i++) { deleteDeltas[i] = deleteDeltaStat[i].getPath().getName(); - if (deleteDeltas[i].equals("delete_delta_0000003_0000004")) { + if (deleteDeltas[i].equals("delete_delta_0000001_0000002")) { minorCompactedDeleteDelta = deleteDeltaStat[i].getPath(); } } Arrays.sort(deleteDeltas); - String[] expectedDeleteDeltas = new String[]{"delete_delta_0000003_0000004"}; + String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000002"}; if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas)); } @@ -1115,17 +1115,17 @@ public void minorCompactWhileStreamingWithSplitUpdate() throws Exception { Path resultFile = null; for (int i = 0; i < names.length; i++) { names[i] = stat[i].getPath().getName(); - if (names[i].equals("delta_0000003_0000006")) { + if (names[i].equals("delta_0000001_0000004")) { resultFile = stat[i].getPath(); } } Arrays.sort(names); - String[] expected = new String[]{"delta_0000003_0000004", - "delta_0000003_0000006", "delta_0000005_0000006", "delta_0000007_0000008"}; + String[] expected = new String[]{"delta_0000001_0000002", + "delta_0000001_0000004", "delta_0000003_0000004", "delta_0000005_0000006"}; if (!Arrays.deepEquals(expected, names)) { Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names)); } - checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty, 0, 3L, 6L, 1); + checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); // Verify that we have got correct set of delete_deltas also FileStatus[] deleteDeltaStat = @@ -1134,12 +1134,12 @@ public void minorCompactWhileStreamingWithSplitUpdate() throws Exception { Path minorCompactedDeleteDelta = null; for (int i = 0; i < deleteDeltas.length; i++) { deleteDeltas[i] = deleteDeltaStat[i].getPath().getName(); - if (deleteDeltas[i].equals("delete_delta_0000003_0000006")) { + if (deleteDeltas[i].equals("delete_delta_0000001_0000004")) { minorCompactedDeleteDelta = deleteDeltaStat[i].getPath(); } } Arrays.sort(deleteDeltas); - String[] expectedDeleteDeltas = new String[]{"delete_delta_0000003_0000006"}; + String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000004"}; if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas)); } @@ -1331,14 +1331,19 @@ private void writeBatch(StreamingConnection connection, DelimitedInputWriter wri private void checkExpectedTxnsPresent(Path base, Path[] deltas, String columnNamesProperty, String columnTypesProperty, int bucket, long min, long max, int numBuckets) throws IOException { - ValidTxnList txnList = new ValidTxnList() { + ValidWriteIdList writeIdList = new ValidWriteIdList() { @Override - public boolean isTxnValid(long txnid) { + public String getTableName() { + return "AcidTable"; + } + + @Override + public boolean isWriteIdValid(long writeid) { return true; } @Override - public RangeResponse isTxnRangeValid(long minTxnId, long maxTxnId) { + public RangeResponse isWriteIdRangeValid(long minWriteId, long maxWriteId) { return RangeResponse.ALL; } @@ -1353,7 +1358,7 @@ public void readFromString(String src) { } @Override - public Long getMinOpenTxn() { return null; } + public Long getMinOpenWriteId() { return null; } @Override public long getHighWatermark() { @@ -1361,7 +1366,7 @@ public long getHighWatermark() { } @Override - public long[] getInvalidTransactions() { + public long[] getInvalidWriteIds() { return new long[0]; } @Override @@ -1370,12 +1375,12 @@ public boolean isValidBase(long txnid) { } @Override - public boolean isTxnAborted(long txnid) { + public boolean isWriteIdAborted(long txnid) { return true; } @Override - public RangeResponse isTxnRangeAborted(long minTxnId, long maxTxnId) { + public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) { return RangeResponse.ALL; } }; @@ -1388,18 +1393,18 @@ public RangeResponse isTxnRangeAborted(long minTxnId, long maxTxnId) { conf.set(hive_metastoreConstants.BUCKET_COUNT, Integer.toString(numBuckets)); HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true); AcidInputFormat.RawReader reader = - aif.getRawReader(conf, true, bucket, txnList, base, deltas); + aif.getRawReader(conf, true, bucket, writeIdList, base, deltas); RecordIdentifier identifier = reader.createKey(); OrcStruct value = reader.createValue(); long currentTxn = min; boolean seenCurrentTxn = false; while (reader.next(identifier, value)) { if (!seenCurrentTxn) { - Assert.assertEquals(currentTxn, identifier.getTransactionId()); + Assert.assertEquals(currentTxn, identifier.getWriteId()); seenCurrentTxn = true; } - if (currentTxn != identifier.getTransactionId()) { - Assert.assertEquals(currentTxn + 1, identifier.getTransactionId()); + if (currentTxn != identifier.getWriteId()) { + Assert.assertEquals(currentTxn + 1, identifier.getWriteId()); currentTxn++; } } diff --git a/metastore/scripts/upgrade/derby/050-HIVE-18192.derby.sql b/metastore/scripts/upgrade/derby/050-HIVE-18192.derby.sql new file mode 100644 index 0000000..3e40dfe --- /dev/null +++ b/metastore/scripts/upgrade/derby/050-HIVE-18192.derby.sql @@ -0,0 +1,16 @@ +CREATE TABLE TXN_TO_WRITE_ID ( + T2W_TXNID bigint, + T2W_DATABASE varchar(128) NOT NULL, + T2W_TABLE varchar(256) NOT NULL, + T2W_WRITEID bigint NOT NULL +); + +CREATE TABLE NEXT_WRITE_ID ( + NWI_DATABASE varchar(128) NOT NULL, + NWI_TABLE varchar(256) NOT NULL, + NWI_NEXT bigint NOT NULL +); + +RENAME COLUMN COMPACTION_QUEUE.CQ_HIGHEST_TXN_ID TO CQ_HIGHEST_WRITE_ID; + +RENAME COLUMN COMPLETED_COMPACTIONS.CC_HIGHEST_TXN_ID TO CC_HIGHEST_WRITE_ID; diff --git a/metastore/scripts/upgrade/derby/hive-txn-schema-3.0.0.derby.sql b/metastore/scripts/upgrade/derby/hive-txn-schema-3.0.0.derby.sql index 85d593f..ce34c40 100644 --- a/metastore/scripts/upgrade/derby/hive-txn-schema-3.0.0.derby.sql +++ b/metastore/scripts/upgrade/derby/hive-txn-schema-3.0.0.derby.sql @@ -53,6 +53,19 @@ CREATE TABLE NEXT_TXN_ID ( ); INSERT INTO NEXT_TXN_ID VALUES(1); +CREATE TABLE TXN_TO_WRITE_ID ( + T2W_TXNID bigint, + T2W_DATABASE varchar(128) NOT NULL, + T2W_TABLE varchar(256) NOT NULL, + T2W_WRITEID bigint NOT NULL +); + +CREATE TABLE NEXT_WRITE_ID ( + NWI_DATABASE varchar(128) NOT NULL, + NWI_TABLE varchar(256) NOT NULL, + NWI_NEXT bigint NOT NULL +); + CREATE TABLE HIVE_LOCKS ( HL_LOCK_EXT_ID bigint NOT NULL, HL_LOCK_INT_ID bigint NOT NULL, @@ -91,7 +104,7 @@ CREATE TABLE COMPACTION_QUEUE ( CQ_WORKER_ID varchar(128), CQ_START bigint, CQ_RUN_AS varchar(128), - CQ_HIGHEST_TXN_ID bigint, + CQ_HIGHEST_WRITE_ID bigint, CQ_META_INFO varchar(2048) for bit data, CQ_HADOOP_JOB_ID varchar(32) ); @@ -113,7 +126,7 @@ CREATE TABLE COMPLETED_COMPACTIONS ( CC_START bigint, CC_END bigint, CC_RUN_AS varchar(128), - CC_HIGHEST_TXN_ID bigint, + CC_HIGHEST_WRITE_ID bigint, CC_META_INFO varchar(2048) for bit data, CC_HADOOP_JOB_ID varchar(32) ); diff --git a/metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql b/metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql index 3a11881..55b89e7 100644 --- a/metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql +++ b/metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql @@ -7,5 +7,6 @@ RUN '045-HIVE-16886.derby.sql'; RUN '046-HIVE-17566.derby.sql'; RUN '048-HIVE-14498.derby.sql'; RUN '049-HIVE-18489.derby.sql'; +RUN '050-HIVE-18192.derby.sql'; UPDATE "APP".VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 74595b0..5202a17 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -42,8 +42,9 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hive.common.JavaUtils; -import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.metrics.common.Metrics; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; @@ -59,6 +60,7 @@ import org.apache.hadoop.hive.ql.exec.DagUtils; import org.apache.hadoop.hive.ql.exec.ExplainTask; import org.apache.hadoop.hive.ql.exec.FetchTask; +import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -74,6 +76,7 @@ import org.apache.hadoop.hive.ql.hooks.HooksLoader; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.lockmgr.HiveLock; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.lockmgr.LockException; @@ -1210,30 +1213,76 @@ public FetchTask getFetchTask() { return fetchTask; } - // Write the current set of valid transactions into the conf file so that it can be read by - // the input format. + // Write the current set of valid transactions into the conf file private void recordValidTxns(HiveTxnManager txnMgr) throws LockException { - ValidTxnList oldList = null; - String s = conf.get(ValidTxnList.VALID_TXNS_KEY); - if(s != null && s.length() > 0) { - oldList = new ValidReadTxnList(s); - } - ValidTxnList txns = txnMgr.getValidTxns(); - if(oldList != null) { + String oldTxnString = conf.get(ValidTxnList.VALID_TXNS_KEY); + if ((oldTxnString != null) && (oldTxnString.length() > 0)) { throw new IllegalStateException("calling recordValidTxn() more than once in the same " + - JavaUtils.txnIdToString(txnMgr.getCurrentTxnId())); + JavaUtils.txnIdToString(txnMgr.getCurrentTxnId())); } - String txnStr = txns.toString(); + ValidTxnList txnList = txnMgr.getValidTxns(); + String txnStr = txnList.toString(); conf.set(ValidTxnList.VALID_TXNS_KEY, txnStr); - if(plan.getFetchTask() != null) { + LOG.debug("Encoding valid txns info " + txnStr + " txnid:" + txnMgr.getCurrentTxnId()); + } + + // Write the current set of valid write ids for the operated acid tables into the conf file so + // that it can be read by the input format. + private void recordValidWriteIds(HiveTxnManager txnMgr) throws LockException { + String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); + if ((txnString == null) || (txnString.isEmpty())) { + throw new IllegalStateException("calling recordValidWritsIdss() without initializing ValidTxnList " + + JavaUtils.txnIdToString(txnMgr.getCurrentTxnId())); + } + ValidTxnWriteIdList txnWriteIds = txnMgr.getValidWriteIds(getTransactionalTableList(plan), txnString); + String writeIdStr = txnWriteIds.toString(); + conf.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, writeIdStr); + conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIdStr); + if (plan.getFetchTask() != null) { /** * This is needed for {@link HiveConf.ConfVars.HIVEFETCHTASKCONVERSION} optimization which * initializes JobConf in FetchOperator before recordValidTxns() but this has to be done * after locks are acquired to avoid race conditions in ACID. + * This case is supported only for single source query. */ - plan.getFetchTask().setValidTxnList(txnStr); + Operator source = plan.getFetchTask().getWork().getSource(); + if (source instanceof TableScanOperator) { + TableScanOperator tsOp = (TableScanOperator)source; + ValidWriteIdList writeIdList = txnWriteIds.getTableWriteIdList( + AcidUtils.getFullTableName(tsOp.getConf().getDatabaseName(), tsOp.getConf().getTableName())); + plan.getFetchTask().setValidWriteIdList(writeIdList.toString()); + } + } + LOG.debug("Encoding valid write ids info " + writeIdStr + " txnid:" + txnMgr.getCurrentTxnId()); + } + + // Make the list of transactional tables list which are getting read or written by current txn + private List getTransactionalTableList(QueryPlan plan) { + List tableList = new ArrayList<>(); + + for (ReadEntity input : plan.getInputs()) { + addTableFromEntity(input, tableList); + } + return tableList; + } + + private void addTableFromEntity(Entity entity, List tableList) { + Table tbl; + switch (entity.getType()) { + case TABLE: + tbl = entity.getTable(); + break; + case PARTITION: + case DUMMYPARTITION: + tbl = entity.getPartition().getTable(); + break; + default: + return; + } + String fullTableName = AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName()); + if (AcidUtils.isTransactionalTable(tbl) && !tableList.contains(fullTableName)) { + tableList.add(fullTableName); } - LOG.debug("Encoding valid txns info " + txnStr + " txnid:" + txnMgr.getCurrentTxnId()); } private String getUserFromUGI() { @@ -1276,7 +1325,7 @@ private void acquireLocks() throws CommandProcessorResponse { if(userFromUGI == null) { throw createProcessorResponse(10); } - // Set the transaction id in all of the acid file sinks + // Set the table write id in all of the acid file sinks if (haveAcidWrite()) { List acidSinks = new ArrayList<>(plan.getAcidSinks()); //sorting makes tests easier to write since file names and ROW__IDs depend on statementId @@ -1284,18 +1333,25 @@ private void acquireLocks() throws CommandProcessorResponse { acidSinks.sort((FileSinkDesc fsd1, FileSinkDesc fsd2) -> fsd1.getDirName().compareTo(fsd2.getDirName())); for (FileSinkDesc desc : acidSinks) { - desc.setTransactionId(queryTxnMgr.getCurrentTxnId()); + TableDesc tableInfo = desc.getTableInfo(); + long writeId = queryTxnMgr.getTableWriteId(Utilities.getDatabaseName(tableInfo.getTableName()), + Utilities.getTableName(tableInfo.getTableName())); + desc.setTableWriteId(writeId); + //it's possible to have > 1 FileSink writing to the same table/partition //e.g. Merge stmt, multi-insert stmt when mixing DP and SP writes - desc.setStatementId(queryTxnMgr.getWriteIdAndIncrement()); + desc.setStatementId(queryTxnMgr.getStmtIdAndIncrement()); } } /*It's imperative that {@code acquireLocks()} is called for all commands so that HiveTxnManager can transition its state machine correctly*/ queryTxnMgr.acquireLocks(plan, ctx, userFromUGI, lDrvState); - if(queryTxnMgr.recordSnapshot(plan)) { + if (queryTxnMgr.recordSnapshot(plan)) { recordValidTxns(queryTxnMgr); } + if (plan.hasAcidResourcesInQuery()) { + recordValidWriteIds(queryTxnMgr); + } } catch (Exception e) { errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage(); SQLState = ErrorMsg.findSQLState(e.getMessage()); @@ -1337,6 +1393,7 @@ public void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnMa // If we've opened a transaction we need to commit or rollback rather than explicitly // releasing the locks. conf.unset(ValidTxnList.VALID_TXNS_KEY); + conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); if(!checkConcurrency()) { return; } @@ -1479,8 +1536,6 @@ public void lockAndRespond() throws CommandProcessorResponse { private static final ReentrantLock globalCompileLock = new ReentrantLock(); private void compileInternal(String command, boolean deferClose) throws CommandProcessorResponse { - int ret; - Metrics metrics = MetricsFactory.getInstance(); if (metrics != null) { metrics.incrementCounter(MetricsConstant.WAITING_COMPILE_OPS, 1); @@ -1654,7 +1709,6 @@ private void runInternal(String command, boolean alreadyCompiled) throw cpr; } - //if needRequireLock is false, the release here will do nothing because there is no lock try { //since set autocommit starts an implicit txn, close it diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java index cf19351..df84417 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java @@ -260,7 +260,7 @@ public void closeOp(boolean abort) throws HiveException { // There's always just one file that we have merged. // The union/DP/etc. should already be account for in the path. Utilities.writeMmCommitManifest(Lists.newArrayList(outPath), - tmpPath.getParent(), fs, taskId, conf.getTxnId(), conf.getStmtId(), null, false); + tmpPath.getParent(), fs, taskId, conf.getWriteId(), conf.getStmtId(), null, false); LOG.info("Merged into " + finalPath + "(" + fss.getLen() + " bytes)."); } } @@ -322,7 +322,7 @@ public void jobCloseOp(Configuration hconf, boolean success) try { Path outputDir = conf.getOutputPath(); FileSystem fs = outputDir.getFileSystem(hconf); - Long mmWriteId = conf.getTxnId(); + Long mmWriteId = conf.getWriteId(); int stmtId = conf.getStmtId(); if (!isMmTable) { Path backupPath = backupOutputPath(fs, outputDir); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index d3aa571..a2e2fb3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -51,7 +51,6 @@ import java.util.concurrent.ExecutionException; import com.google.common.collect.ImmutableSet; -import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -61,8 +60,8 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; @@ -76,7 +75,6 @@ import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.BasicTxnInfo; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.CompactionResponse; @@ -108,7 +106,6 @@ import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; import org.apache.hadoop.hive.metastore.api.WMTrigger; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; @@ -4419,7 +4416,7 @@ private void checkMmLb(Partition part) throws HiveException { } private void handleRemoveMm( - Path path, ValidTxnList validTxnList, List result) throws HiveException { + Path path, ValidWriteIdList validWriteIdList, List result) throws HiveException { // Note: doesn't take LB into account; that is not presently supported here (throws above). try { FileSystem fs = path.getFileSystem(conf); @@ -4429,10 +4426,10 @@ private void handleRemoveMm( ensureDelete(fs, childPath, "a non-directory file"); continue; } - Long writeId = JavaUtils.extractTxnId(childPath); + Long writeId = JavaUtils.extractWriteId(childPath); if (writeId == null) { ensureDelete(fs, childPath, "an unknown directory"); - } else if (!validTxnList.isTxnValid(writeId)) { + } else if (!validWriteIdList.isWriteIdValid(writeId)) { // Assume no concurrent active writes - we rely on locks here. We could check and fail. ensureDelete(fs, childPath, "an uncommitted directory"); } else { @@ -4467,9 +4464,10 @@ private static void ensureDelete(FileSystem fs, Path path, String what) throws I try { HiveTxnManager txnManager = SessionState.get().getTxnMgr(); if (txnManager.isTxnOpen()) { - mmWriteId = txnManager.getCurrentTxnId(); + mmWriteId = txnManager.getTableWriteId(tbl.getDbName(), tbl.getTableName()); } else { - mmWriteId = txnManager.openTxn(new Context(conf), conf.getUser()); + txnManager.openTxn(new Context(conf), conf.getUser()); + mmWriteId = txnManager.getTableWriteId(tbl.getDbName(), tbl.getTableName()); txnManager.commitTxn(); } } catch (Exception e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index 97e1e36..01654e2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -37,8 +37,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -151,8 +151,8 @@ public FetchOperator(FetchWork work, JobConf job, Operator operator, initialize(); } - public void setValidTxnList(String txnStr) { - job.set(ValidTxnList.VALID_TXNS_KEY, txnStr); + public void setValidWriteIdList(String writeIdStr) { + job.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIdStr); } private void initialize() throws HiveException { if (isStatReader) { @@ -407,17 +407,17 @@ private String processCurrPathForMmWriteIds(InputFormat inputFormat) throws IOEx if (inputFormat instanceof HiveInputFormat) { return StringUtils.escapeString(currPath.toString()); // No need to process here. } - ValidTxnList validTxnList; + ValidWriteIdList validWriteIdList; if (AcidUtils.isInsertOnlyTable(currDesc.getTableDesc().getProperties())) { - validTxnList = extractValidTxnList(); + validWriteIdList = extractValidTxnList(); } else { - validTxnList = null; // non-MM case + validWriteIdList = null; // non-MM case } - if (validTxnList != null) { + if (validWriteIdList != null) { Utilities.FILE_OP_LOGGER.info("Processing " + currDesc.getTableName() + " for MM paths"); } - Path[] dirs = HiveInputFormat.processPathsForMmRead(Lists.newArrayList(currPath), job, validTxnList); + Path[] dirs = HiveInputFormat.processPathsForMmRead(Lists.newArrayList(currPath), job, validWriteIdList); if (dirs == null || dirs.length == 0) { return null; // No valid inputs. This condition is logged inside the call. } @@ -428,10 +428,10 @@ private String processCurrPathForMmWriteIds(InputFormat inputFormat) throws IOEx return str.toString(); } - private ValidTxnList extractValidTxnList() { + private ValidWriteIdList extractValidTxnList() { if (currDesc.getTableName() == null || !org.apache.commons.lang.StringUtils.isBlank(currDesc.getTableName())) { - String txnString = job.get(ValidTxnList.VALID_TXNS_KEY); - return txnString == null ? new ValidReadTxnList() : new ValidReadTxnList(txnString); + String txnString = job.get(ValidWriteIdList.VALID_WRITEIDS_KEY); + return txnString == null ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString); } return null; // not fetching from a table directly but from a temp location } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java index a7dace9..519bee3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java @@ -57,8 +57,8 @@ public FetchTask() { super(); } - public void setValidTxnList(String txnStr) { - fetch.setValidTxnList(txnStr); + public void setValidWriteIdList(String writeIdStr) { + fetch.setValidWriteIdList(writeIdStr); } @Override public void initialize(QueryState queryState, QueryPlan queryPlan, DriverContext ctx, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 98bb938..da31cef 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -173,7 +173,7 @@ int acidLastBucket = -1; int acidFileOffset = -1; private boolean isMmTable; - private Long txnId; + private Long writeId; private int stmtId; String dpDir; @@ -185,7 +185,7 @@ public FSPaths(Path specPath, boolean isMmTable) { } else { tmpPath = specPath; taskOutputTempPath = null; // Should not be used. - txnId = conf.getTransactionId(); + writeId = conf.getTableWriteId(); stmtId = conf.getStatementId(); } if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { @@ -337,7 +337,7 @@ public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeT } outPaths[filesIdx] = getTaskOutPath(taskId); } else { - String subdirPath = AcidUtils.baseOrDeltaSubdir(conf.getInsertOverwrite(), txnId, txnId, stmtId); + String subdirPath = AcidUtils.baseOrDeltaSubdir(conf.getInsertOverwrite(), writeId, writeId, stmtId); if (unionPath != null) { // Create the union directory inside the MM directory. subdirPath += Path.SEPARATOR + unionPath; @@ -961,7 +961,7 @@ public void process(Object row, int tag) throws HiveException { if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || conf.isMmTable()) { rowOutWriters[findWriterOffset(row)].write(recordValue); } else if (conf.getWriteType() == AcidUtils.Operation.INSERT) { - fpaths.updaters[findWriterOffset(row)].insert(conf.getTransactionId(), row); + fpaths.updaters[findWriterOffset(row)].insert(conf.getTableWriteId(), row); } else { // TODO I suspect we could skip much of the stuff above this in the function in the case // of update and delete. But I don't understand all of the side effects of the above @@ -1018,9 +1018,9 @@ public void process(Object row, int tag) throws HiveException { } } if (conf.getWriteType() == AcidUtils.Operation.UPDATE) { - fpaths.updaters[writerOffset].update(conf.getTransactionId(), row); + fpaths.updaters[writerOffset].update(conf.getTableWriteId(), row); } else if (conf.getWriteType() == AcidUtils.Operation.DELETE) { - fpaths.updaters[writerOffset].delete(conf.getTransactionId(), row); + fpaths.updaters[writerOffset].delete(conf.getTableWriteId(), row); } else { throw new HiveException("Unknown write type " + conf.getWriteType().toString()); } @@ -1322,7 +1322,7 @@ public void closeOp(boolean abort) throws HiveException { } if (conf.isMmTable()) { Utilities.writeMmCommitManifest( - commitPaths, specPath, fs, taskId, conf.getTransactionId(), conf.getStatementId(), unionPath, conf.getInsertOverwrite()); + commitPaths, specPath, fs, taskId, conf.getTableWriteId(), conf.getStatementId(), unionPath, conf.getInsertOverwrite()); } // Only publish stats if this operator's flag was set to gather stats if (conf.isGatherStats()) { @@ -1380,7 +1380,7 @@ public void jobCloseOp(Configuration hconf, boolean success) MissingBucketsContext mbc = new MissingBucketsContext( conf.getTableInfo(), numBuckets, conf.getCompressed()); Utilities.handleMmTableFinalPath(specPath, unionSuffix, hconf, success, - dpLevels, lbLevels, mbc, conf.getTransactionId(), conf.getStatementId(), reporter, + dpLevels, lbLevels, mbc, conf.getTableWriteId(), conf.getStatementId(), reporter, conf.isMmTable(), conf.isMmCtas(), conf.getInsertOverwrite()); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 941dd58..cb4d55e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -52,7 +52,6 @@ import java.util.Arrays; import java.util.Calendar; import java.util.Collection; -import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; @@ -101,7 +100,7 @@ import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.StringInternUtils; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.Warehouse; @@ -4394,7 +4393,7 @@ private static void deleteUncommitedFile(Path childPath, FileSystem fs) * if the entire directory is valid (has no uncommitted/temporary files). */ public static List getValidMmDirectoriesFromTableOrPart(Path path, Configuration conf, - ValidTxnList validTxnList, int lbLevels) throws IOException { + ValidWriteIdList validWriteIdList, int lbLevels) throws IOException { Utilities.FILE_OP_LOGGER.trace("Looking for valid MM paths under {}", path); // NULL means this directory is entirely valid. List result = null; @@ -4404,8 +4403,8 @@ private static void deleteUncommitedFile(Path childPath, FileSystem fs) for (int i = 0; i < children.length; ++i) { FileStatus file = children[i]; Path childPath = file.getPath(); - Long txnId = JavaUtils.extractTxnId(childPath); - if (!file.isDirectory() || txnId == null || !validTxnList.isTxnValid(txnId)) { + Long writeId = JavaUtils.extractWriteId(childPath); + if (!file.isDirectory() || writeId == null || !validWriteIdList.isWriteIdValid(writeId)) { Utilities.FILE_OP_LOGGER.debug("Skipping path {}", childPath); if (result == null) { result = new ArrayList<>(children.length - 1); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java index abd42ec..65fb2a0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java @@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.LogUtils; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.io.CachingPrintStream; import org.apache.hadoop.hive.common.log.LogRedirector; import org.apache.hadoop.hive.common.metrics.common.Metrics; @@ -487,6 +488,7 @@ private void initializeOperators(Map fetchOpJobConfMap) AcidUtils.setAcidTableScan(jobClone, ts.getConf().isAcidTable()); AcidUtils.setAcidOperationalProperties(jobClone, ts.getConf().getAcidOperationalProperties()); + AcidUtils.setValidWriteIdList(jobClone, ts.getConf()); // create a fetch operator FetchOperator fetchOp = new FetchOperator(entry.getValue(), jobClone); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java index 65eb434..1ed35b3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java @@ -20,7 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; @@ -109,8 +109,8 @@ extends InputFormat, InputFormatChecker { static final class DeltaMetaData implements Writable { - private long minTxnId; - private long maxTxnId; + private long minWriteId; + private long maxWriteId; private List stmtIds; //would be useful to have enum for Type: insert/delete/load data @@ -120,27 +120,27 @@ public DeltaMetaData() { /** * @param stmtIds delta dir suffixes when a single txn writes > 1 delta in the same partition */ - DeltaMetaData(long minTxnId, long maxTxnId, List stmtIds) { - this.minTxnId = minTxnId; - this.maxTxnId = maxTxnId; + DeltaMetaData(long minWriteId, long maxWriteId, List stmtIds) { + this.minWriteId = minWriteId; + this.maxWriteId = maxWriteId; if (stmtIds == null) { throw new IllegalArgumentException("stmtIds == null"); } this.stmtIds = stmtIds; } - long getMinTxnId() { - return minTxnId; + long getMinWriteId() { + return minWriteId; } - long getMaxTxnId() { - return maxTxnId; + long getMaxWriteId() { + return maxWriteId; } List getStmtIds() { return stmtIds; } @Override public void write(DataOutput out) throws IOException { - out.writeLong(minTxnId); - out.writeLong(maxTxnId); + out.writeLong(minWriteId); + out.writeLong(maxWriteId); out.writeInt(stmtIds.size()); for(Integer id : stmtIds) { out.writeInt(id); @@ -148,8 +148,8 @@ public void write(DataOutput out) throws IOException { } @Override public void readFields(DataInput in) throws IOException { - minTxnId = in.readLong(); - maxTxnId = in.readLong(); + minWriteId = in.readLong(); + maxWriteId = in.readLong(); stmtIds.clear(); int numStatements = in.readInt(); for(int i = 0; i < numStatements; i++) { @@ -159,7 +159,7 @@ public void readFields(DataInput in) throws IOException { @Override public String toString() { //? is Type - when implemented - return "Delta(?," + minTxnId + "," + maxTxnId + "," + stmtIds + ")"; + return "Delta(?," + minWriteId + "," + maxWriteId + "," + stmtIds + ")"; } } /** @@ -227,7 +227,7 @@ public Reporter getReporter() { * @param collapseEvents should the ACID events be collapsed so that only * the last version of the row is kept. * @param bucket the bucket to read - * @param validTxnList the list of valid transactions to use + * @param validWriteIdList the list of valid write ids to use * @param baseDirectory the base directory to read or the root directory for * old style files * @param deltaDirectory a list of delta files to include in the merge @@ -237,7 +237,7 @@ public Reporter getReporter() { RawReader getRawReader(Configuration conf, boolean collapseEvents, int bucket, - ValidTxnList validTxnList, + ValidWriteIdList validWriteIdList, Path baseDirectory, Path[] deltaDirectory ) throws IOException; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java index 26d4dc6..05beafe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java @@ -49,8 +49,8 @@ private boolean isCompressed = false; private Properties properties; private Reporter reporter; - private long minimumTransactionId; - private long maximumTransactionId; + private long minimumWriteId; + private long maximumWriteId; private int bucketId; /** * Based on {@link org.apache.hadoop.hive.ql.metadata.Hive#mvFile(HiveConf, FileSystem, Path, FileSystem, Path, boolean, boolean)} @@ -156,22 +156,22 @@ public Options reporter(Reporter reporter) { } /** - * The minimum transaction id that is included in this file. - * @param min minimum transaction id + * The minimum write id that is included in this file. + * @param min minimum write id * @return this */ - public Options minimumTransactionId(long min) { - this.minimumTransactionId = min; + public Options minimumWriteId(long min) { + this.minimumWriteId = min; return this; } /** - * The maximum transaction id that is included in this file. - * @param max maximum transaction id + * The maximum write id that is included in this file. + * @param max maximum write id * @return this */ - public Options maximumTransactionId(long max) { - this.maximumTransactionId = max; + public Options maximumWriteId(long max) { + this.maximumWriteId = max; return this; } @@ -236,7 +236,7 @@ public Options useDummy(PrintStream stream) { */ public Options statementId(int id) { if(id >= AcidUtils.MAX_STATEMENTS_PER_TXN) { - throw new RuntimeException("Too many statements for transactionId: " + maximumTransactionId); + throw new RuntimeException("Too many statements for writeId: " + maximumWriteId); } if(id < -1) { throw new IllegalArgumentException("Illegal statementId value: " + id); @@ -277,12 +277,12 @@ public Reporter getReporter() { return reporter; } - public long getMinimumTransactionId() { - return minimumTransactionId; + public long getMinimumWriteId() { + return minimumWriteId; } - public long getMaximumTransactionId() { - return maximumTransactionId; + public long getMaximumWriteId() { + return maximumWriteId; } public boolean isWritingBase() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 430e0fc..64dfa4a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -36,7 +36,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.DataOperationType; @@ -49,6 +50,7 @@ import org.apache.hadoop.hive.ql.io.orc.Reader; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.CreateTableDesc; +import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.shims.HadoopShims; import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId; import org.apache.hadoop.hive.shims.ShimLoader; @@ -224,8 +226,8 @@ public static String deleteDeltaSubdir(long min, long max, int statementId) { return deleteDeltaSubdir(min, max) + "_" + String.format(STATEMENT_DIGITS, statementId); } - public static String baseDir(long txnId) { - return BASE_PREFIX + String.format(DELTA_DIGITS, txnId); + public static String baseDir(long writeId) { + return BASE_PREFIX + String.format(DELTA_DIGITS, writeId); } /** @@ -254,31 +256,31 @@ public static Path createFilename(Path directory, options.getBucketId()) + "_0"); } else if (options.isWritingBase()) { subdir = BASE_PREFIX + String.format(DELTA_DIGITS, - options.getMaximumTransactionId()); + options.getMaximumWriteId()); } else if(options.getStatementId() == -1) { //when minor compaction runs, we collapse per statement delta files inside a single //transaction so we no longer need a statementId in the file name subdir = options.isWritingDeleteDelta() ? - deleteDeltaSubdir(options.getMinimumTransactionId(), - options.getMaximumTransactionId()) - : deltaSubdir(options.getMinimumTransactionId(), - options.getMaximumTransactionId()); + deleteDeltaSubdir(options.getMinimumWriteId(), + options.getMaximumWriteId()) + : deltaSubdir(options.getMinimumWriteId(), + options.getMaximumWriteId()); } else { subdir = options.isWritingDeleteDelta() ? - deleteDeltaSubdir(options.getMinimumTransactionId(), - options.getMaximumTransactionId(), + deleteDeltaSubdir(options.getMinimumWriteId(), + options.getMaximumWriteId(), options.getStatementId()) - : deltaSubdir(options.getMinimumTransactionId(), - options.getMaximumTransactionId(), + : deltaSubdir(options.getMinimumWriteId(), + options.getMaximumWriteId(), options.getStatementId()); } return createBucketFile(new Path(directory, subdir), options.getBucketId()); } /** - * Get the transaction id from a base directory name. + * Get the write id from a base directory name. * @param path the base directory name - * @return the maximum transaction id that is included + * @return the maximum write id that is included */ public static long parseBase(Path path) { String filename = path.getName(); @@ -306,8 +308,8 @@ public static long parseBase(Path path) { Integer.parseInt(filename.substring(0, filename.indexOf('_'))); result .setOldStyle(true) - .minimumTransactionId(0) - .maximumTransactionId(0) + .minimumWriteId(0) + .maximumWriteId(0) .bucket(bucket) .writingBase(!bucketFile.getParent().getName().startsWith(DELTA_PREFIX)); } @@ -318,8 +320,8 @@ else if(ORIGINAL_PATTERN_COPY.matcher(filename).matches()) { int copyNumber = Integer.parseInt(filename.substring(filename.lastIndexOf('_') + 1)); result .setOldStyle(true) - .minimumTransactionId(0) - .maximumTransactionId(0) + .minimumWriteId(0) + .maximumWriteId(0) .bucket(bucket) .copyNumber(copyNumber) .writingBase(!bucketFile.getParent().getName().startsWith(DELTA_PREFIX)); @@ -330,8 +332,8 @@ else if (filename.startsWith(BUCKET_PREFIX)) { if (bucketFile.getParent().getName().startsWith(BASE_PREFIX)) { result .setOldStyle(false) - .minimumTransactionId(0) - .maximumTransactionId(parseBase(bucketFile.getParent())) + .minimumWriteId(0) + .maximumWriteId(parseBase(bucketFile.getParent())) .bucket(bucket) .writingBase(true); } else if (bucketFile.getParent().getName().startsWith(DELTA_PREFIX)) { @@ -339,21 +341,21 @@ else if (filename.startsWith(BUCKET_PREFIX)) { bucketFile.getFileSystem(conf)); result .setOldStyle(false) - .minimumTransactionId(parsedDelta.minTransaction) - .maximumTransactionId(parsedDelta.maxTransaction) + .minimumWriteId(parsedDelta.minWriteId) + .maximumWriteId(parsedDelta.maxWriteId) .bucket(bucket); } else if (bucketFile.getParent().getName().startsWith(DELETE_DELTA_PREFIX)) { ParsedDelta parsedDelta = parsedDelta(bucketFile.getParent(), DELETE_DELTA_PREFIX, bucketFile.getFileSystem(conf)); result .setOldStyle(false) - .minimumTransactionId(parsedDelta.minTransaction) - .maximumTransactionId(parsedDelta.maxTransaction) + .minimumWriteId(parsedDelta.minWriteId) + .maximumWriteId(parsedDelta.maxWriteId) .bucket(bucket); } } else { - result.setOldStyle(true).bucket(-1).minimumTransactionId(0) - .maximumTransactionId(0); + result.setOldStyle(true).bucket(-1).minimumWriteId(0) + .maximumWriteId(0); } return result; } @@ -637,8 +639,8 @@ public String toString() { * Immutable */ public static final class ParsedDelta implements Comparable { - private final long minTransaction; - private final long maxTransaction; + private final long minWriteId; + private final long maxWriteId; private final FileStatus path; //-1 is for internal (getAcidState()) purposes and means the delta dir //had no statement ID @@ -655,8 +657,8 @@ private ParsedDelta(long min, long max, FileStatus path, boolean isDeleteDelta, } private ParsedDelta(long min, long max, FileStatus path, int statementId, boolean isDeleteDelta, boolean isRawFormat) { - this.minTransaction = min; - this.maxTransaction = max; + this.minWriteId = min; + this.maxWriteId = max; this.path = path; this.statementId = statementId; this.isDeleteDelta = isDeleteDelta; @@ -664,12 +666,12 @@ private ParsedDelta(long min, long max, FileStatus path, int statementId, assert !isDeleteDelta || !isRawFormat : " deleteDelta should not be raw format"; } - public long getMinTransaction() { - return minTransaction; + public long getMinWriteId() { + return minWriteId; } - public long getMaxTransaction() { - return maxTransaction; + public long getMaxWriteId() { + return maxWriteId; } public Path getPath() { @@ -698,14 +700,14 @@ public boolean isRawFormat() { */ @Override public int compareTo(ParsedDelta parsedDelta) { - if (minTransaction != parsedDelta.minTransaction) { - if (minTransaction < parsedDelta.minTransaction) { + if (minWriteId != parsedDelta.minWriteId) { + if (minWriteId < parsedDelta.minWriteId) { return -1; } else { return 1; } - } else if (maxTransaction != parsedDelta.maxTransaction) { - if (maxTransaction < parsedDelta.maxTransaction) { + } else if (maxWriteId != parsedDelta.maxWriteId) { + if (maxWriteId < parsedDelta.maxWriteId) { return 1; } else { return -1; @@ -754,11 +756,11 @@ else if(statementId != parsedDelta.statementId) { List result = new ArrayList<>(deltas.size()); AcidInputFormat.DeltaMetaData last = null; for(ParsedDelta parsedDelta : deltas) { - if(last != null && last.getMinTxnId() == parsedDelta.getMinTransaction() && last.getMaxTxnId() == parsedDelta.getMaxTransaction()) { + if(last != null && last.getMinWriteId() == parsedDelta.getMinWriteId() && last.getMaxWriteId() == parsedDelta.getMaxWriteId()) { last.getStmtIds().add(parsedDelta.getStatementId()); continue; } - last = new AcidInputFormat.DeltaMetaData(parsedDelta.getMinTransaction(), parsedDelta.getMaxTransaction(), new ArrayList()); + last = new AcidInputFormat.DeltaMetaData(parsedDelta.getMinWriteId(), parsedDelta.getMaxWriteId(), new ArrayList()); result.add(last); if(parsedDelta.statementId >= 0) { last.getStmtIds().add(parsedDelta.getStatementId()); @@ -780,11 +782,11 @@ else if(statementId != parsedDelta.statementId) { List results = new ArrayList(deleteDeltas.size()); for(AcidInputFormat.DeltaMetaData dmd : deleteDeltas) { if(dmd.getStmtIds().isEmpty()) { - results.add(new Path(root, deleteDeltaSubdir(dmd.getMinTxnId(), dmd.getMaxTxnId()))); + results.add(new Path(root, deleteDeltaSubdir(dmd.getMinWriteId(), dmd.getMaxWriteId()))); continue; } for(Integer stmtId : dmd.getStmtIds()) { - results.add(new Path(root, deleteDeltaSubdir(dmd.getMinTxnId(), dmd.getMaxTxnId(), stmtId))); + results.add(new Path(root, deleteDeltaSubdir(dmd.getMinWriteId(), dmd.getMaxWriteId(), stmtId))); } } return results.toArray(new Path[results.size()]); @@ -802,8 +804,8 @@ private static ParsedDelta parseDelta(FileStatus path, String deltaPrefix, FileS throws IOException { ParsedDelta p = parsedDelta(path.getPath(), deltaPrefix, fs); boolean isDeleteDelta = deltaPrefix.equals(DELETE_DELTA_PREFIX); - return new ParsedDelta(p.getMinTransaction(), - p.getMaxTransaction(), path, p.statementId, isDeleteDelta, p.isRawFormat()); + return new ParsedDelta(p.getMinWriteId(), + p.getMaxWriteId(), path, p.statementId, isDeleteDelta, p.isRawFormat()); } public static ParsedDelta parsedDelta(Path deltaDir, String deltaPrefix, FileSystem fs) @@ -856,16 +858,16 @@ public static boolean isAcid(Path directory, @VisibleForTesting public static Directory getAcidState(Path directory, Configuration conf, - ValidTxnList txnList + ValidWriteIdList writeIdList ) throws IOException { - return getAcidState(directory, conf, txnList, false, false); + return getAcidState(directory, conf, writeIdList, false, false); } /** State class for getChildState; cannot modify 2 things in a method. */ private static class TxnBase { private FileStatus status; - private long txn = 0; - private long oldestBaseTxnId = Long.MAX_VALUE; + private long writeId = 0; + private long oldestBaseWriteId = Long.MAX_VALUE; private Path oldestBase = null; } @@ -876,22 +878,22 @@ public static Directory getAcidState(Path directory, * transaction id that we must exclude. * @param directory the partition directory to analyze * @param conf the configuration - * @param txnList the list of transactions that we are reading + * @param writeIdList the list of write ids that we are reading * @return the state of the directory * @throws IOException */ public static Directory getAcidState(Path directory, Configuration conf, - ValidTxnList txnList, + ValidWriteIdList writeIdList, boolean useFileIds, boolean ignoreEmptyFiles ) throws IOException { - return getAcidState(directory, conf, txnList, Ref.from(useFileIds), ignoreEmptyFiles, null); + return getAcidState(directory, conf, writeIdList, Ref.from(useFileIds), ignoreEmptyFiles, null); } public static Directory getAcidState(Path directory, Configuration conf, - ValidTxnList txnList, + ValidWriteIdList writeIdList, Ref useFileIds, boolean ignoreEmptyFiles, Map tblproperties) throws IOException { @@ -921,13 +923,13 @@ public static Directory getAcidState(Path directory, final List original = new ArrayList<>(); if (childrenWithId != null) { for (HdfsFileStatusWithId child : childrenWithId) { - getChildState(child.getFileStatus(), child, txnList, working, originalDirectories, original, + getChildState(child.getFileStatus(), child, writeIdList, working, originalDirectories, original, obsolete, bestBase, ignoreEmptyFiles, abortedDirectories, tblproperties, fs); } } else { List children = HdfsUtils.listLocatedStatus(fs, directory, hiddenFileFilter); for (FileStatus child : children) { - getChildState(child, null, txnList, working, originalDirectories, original, obsolete, + getChildState(child, null, writeIdList, working, originalDirectories, original, obsolete, bestBase, ignoreEmptyFiles, abortedDirectories, tblproperties, fs); } } @@ -955,30 +957,30 @@ public static Directory getAcidState(Path directory, Collections.sort(working); //so now, 'working' should be sorted like delta_5_20 delta_5_10 delta_11_20 delta_51_60 for example //and we want to end up with the best set containing all relevant data: delta_5_20 delta_51_60, - //subject to list of 'exceptions' in 'txnList' (not show in above example). - long current = bestBase.txn; + //subject to list of 'exceptions' in 'writeIdList' (not show in above example). + long current = bestBase.writeId; int lastStmtId = -1; ParsedDelta prev = null; for(ParsedDelta next: working) { - if (next.maxTransaction > current) { + if (next.maxWriteId > current) { // are any of the new transactions ones that we care about? - if (txnList.isTxnRangeValid(current+1, next.maxTransaction) != - ValidTxnList.RangeResponse.NONE) { + if (writeIdList.isWriteIdRangeValid(current+1, next.maxWriteId) != + ValidWriteIdList.RangeResponse.NONE) { deltas.add(next); - current = next.maxTransaction; + current = next.maxWriteId; lastStmtId = next.statementId; prev = next; } } - else if(next.maxTransaction == current && lastStmtId >= 0) { + else if(next.maxWriteId == current && lastStmtId >= 0) { //make sure to get all deltas within a single transaction; multi-statement txn //generate multiple delta files with the same txnId range - //of course, if maxTransaction has already been minor compacted, all per statement deltas are obsolete + //of course, if maxWriteId has already been minor compacted, all per statement deltas are obsolete deltas.add(next); prev = next; } - else if (prev != null && next.maxTransaction == prev.maxTransaction - && next.minTransaction == prev.minTransaction + else if (prev != null && next.maxWriteId == prev.maxWriteId + && next.minWriteId == prev.minWriteId && next.statementId == prev.statementId) { // The 'next' parsedDelta may have everything equal to the 'prev' parsedDelta, except // the path. This may happen when we have split update and we have two types of delta @@ -1002,15 +1004,15 @@ else if (prev != null && next.maxTransaction == prev.maxTransaction if(bestBase.oldestBase != null && bestBase.status == null) { /** * If here, it means there was a base_x (> 1 perhaps) but none were suitable for given - * {@link txnList}. Note that 'original' files are logically a base_Long.MIN_VALUE and thus + * {@link writeIdList}. Note that 'original' files are logically a base_Long.MIN_VALUE and thus * cannot have any data for an open txn. We could check {@link deltas} has files to cover * [1,n] w/o gaps but this would almost never happen...*/ - long[] exceptions = txnList.getInvalidTransactions(); - String minOpenTxn = exceptions != null && exceptions.length > 0 ? + long[] exceptions = writeIdList.getInvalidWriteIds(); + String minOpenWriteId = exceptions != null && exceptions.length > 0 ? Long.toString(exceptions[0]) : "x"; throw new IOException(ErrorMsg.ACID_NOT_ENOUGH_HISTORY.format( - Long.toString(txnList.getHighWatermark()), - minOpenTxn, bestBase.oldestBase.toString())); + Long.toString(writeIdList.getHighWatermark()), + minOpenWriteId, bestBase.oldestBase.toString())); } final Path base = bestBase.status == null ? null : bestBase.status.getPath(); @@ -1071,43 +1073,44 @@ public boolean isBaseInRawFormat() { * causes anything written previously is ignored (hence the overwrite). In this case, base_x * is visible if txnid:x is committed for current reader. */ - private static boolean isValidBase(long baseTxnId, ValidTxnList txnList, Path baseDir, - FileSystem fs) throws IOException { - if(baseTxnId == Long.MIN_VALUE) { + private static boolean isValidBase(long baseWriteId, ValidWriteIdList writeIdList, Path baseDir, + FileSystem fs) throws IOException { + if(baseWriteId == Long.MIN_VALUE) { //such base is created by 1st compaction in case of non-acid to acid table conversion //By definition there are no open txns with id < 1. return true; } if(!MetaDataFile.isCompacted(baseDir, fs)) { //this is the IOW case - return txnList.isTxnValid(baseTxnId); + return writeIdList.isWriteIdValid(baseWriteId); } - return txnList.isValidBase(baseTxnId); + return writeIdList.isValidBase(baseWriteId); } + private static void getChildState(FileStatus child, HdfsFileStatusWithId childWithId, - ValidTxnList txnList, List working, List originalDirectories, + ValidWriteIdList writeIdList, List working, List originalDirectories, List original, List obsolete, TxnBase bestBase, boolean ignoreEmptyFiles, List aborted, Map tblproperties, FileSystem fs) throws IOException { Path p = child.getPath(); String fn = p.getName(); if (fn.startsWith(BASE_PREFIX) && child.isDir()) { - long txn = parseBase(p); - if(bestBase.oldestBaseTxnId > txn) { + long writeId = parseBase(p); + if(bestBase.oldestBaseWriteId > writeId) { //keep track for error reporting bestBase.oldestBase = p; - bestBase.oldestBaseTxnId = txn; + bestBase.oldestBaseWriteId = writeId; } if (bestBase.status == null) { - if(isValidBase(txn, txnList, p, fs)) { + if(isValidBase(writeId, writeIdList, p, fs)) { bestBase.status = child; - bestBase.txn = txn; + bestBase.writeId = writeId; } - } else if (bestBase.txn < txn) { - if(isValidBase(txn, txnList, p, fs)) { + } else if (bestBase.writeId < writeId) { + if(isValidBase(writeId, writeIdList, p, fs)) { obsolete.add(bestBase.status); bestBase.status = child; - bestBase.txn = txn; + bestBase.writeId = writeId; } } else { obsolete.add(child); @@ -1118,12 +1121,12 @@ private static void getChildState(FileStatus child, HdfsFileStatusWithId childWi (fn.startsWith(DELTA_PREFIX)) ? DELTA_PREFIX : DELETE_DELTA_PREFIX; ParsedDelta delta = parseDelta(child, deltaPrefix, fs); if (tblproperties != null && AcidUtils.isInsertOnlyTable(tblproperties) && - ValidTxnList.RangeResponse.ALL == txnList.isTxnRangeAborted(delta.minTransaction, delta.maxTransaction)) { + ValidWriteIdList.RangeResponse.ALL == writeIdList.isWriteIdRangeAborted(delta.minWriteId, delta.maxWriteId)) { aborted.add(child); } - if (txnList.isTxnRangeValid(delta.minTransaction, - delta.maxTransaction) != - ValidTxnList.RangeResponse.NONE) { + if (writeIdList.isWriteIdRangeValid(delta.minWriteId, + delta.maxWriteId) != + ValidWriteIdList.RangeResponse.NONE) { working.add(delta); } } else if (child.isDir()) { @@ -1393,7 +1396,7 @@ public static AcidOperationalProperties getAcidOperationalProperties( * Returns the logical end of file for an acid data file. * * This relies on the fact that if delta_x_y has no committed transactions it wil be filtered out - * by {@link #getAcidState(Path, Configuration, ValidTxnList)} and so won't be read at all. + * by {@link #getAcidState(Path, Configuration, ValidWriteIdList)} and so won't be read at all. * @param file - data file to read/compute splits on */ public static long getLogicalLength(FileSystem fs, FileStatus file) throws IOException { @@ -1492,6 +1495,39 @@ public static boolean isRemovedInsertOnlyTable(Set removedSet) { } /** + * Extract the ValidWriteIdList for the given table from the list of tables' ValidWriteIdList + */ + public static ValidWriteIdList getTableValidWriteIdList(Configuration conf, String fullTableName) { + String txnString = conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); + ValidTxnWriteIdList validTxnList = new ValidTxnWriteIdList(txnString); + return validTxnList.getTableWriteIdList(fullTableName); + } + + /** + * Set the valid write id list for the current table scan + */ + public static void setValidWriteIdList(Configuration conf, ValidWriteIdList validWriteIds) { + conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, validWriteIds.toString()); + } + + /** + * Set the valid write id list for the current table scan + */ + public static void setValidWriteIdList(Configuration conf, TableScanDesc tsDesc) { + String dbName = tsDesc.getDatabaseName(); + String tableName = tsDesc.getTableName(); + if ((dbName != null) && (tableName != null)) { + ValidWriteIdList validWriteIdList = AcidUtils.getTableValidWriteIdList(conf, + AcidUtils.getFullTableName(dbName, tableName)); + setValidWriteIdList(conf, validWriteIdList); + } + } + + public static String getFullTableName(String dbName, String tableName) { + return dbName.toLowerCase() + "." + tableName.toLowerCase(); + } + + /** * General facility to place a metadta file into a dir created by acid/compactor write. * * Load Data commands against Acid tables write {@link AcidBaseFileType#ORIGINAL_BASE} type files diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java index f0d4988..71498a1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java @@ -341,8 +341,8 @@ private static RecordUpdater getRecordUpdater(JobConf jc, .tableProperties(tableProp) .reporter(reporter) .writingBase(conf.getInsertOverwrite()) - .minimumTransactionId(conf.getTransactionId()) - .maximumTransactionId(conf.getTransactionId()) + .minimumWriteId(conf.getTableWriteId()) + .maximumWriteId(conf.getTableWriteId()) .bucket(bucket) .inspector(inspector) .recordIdColumn(rowIdColNum) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index c3b846c..e4af3d5 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -35,8 +35,7 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StringInternUtils; -import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; import org.apache.hive.common.util.Ref; import org.slf4j.Logger; @@ -457,18 +456,19 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job InputFormat inputFormat, Class inputFormatClass, int splits, TableDesc table, List result) throws IOException { - ValidTxnList validTxnList; + ValidWriteIdList validWriteIdList = AcidUtils.getTableValidWriteIdList(conf, table.getTableName()); + ValidWriteIdList validMmWriteIdList; if (AcidUtils.isInsertOnlyTable(table.getProperties())) { - String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); - validTxnList = txnString == null ? new ValidReadTxnList() : new ValidReadTxnList(txnString); + validMmWriteIdList = validWriteIdList; } else { - validTxnList = null; // for non-MM case + validMmWriteIdList = null; // for non-MM case } try { Utilities.copyTablePropertiesToConf(table, conf); if(tableScan != null) { AcidUtils.setAcidTableScan(conf, tableScan.getConf().isAcidTable()); + AcidUtils.setValidWriteIdList(conf, validWriteIdList); } } catch (HiveException e) { throw new IOException(e); @@ -478,7 +478,7 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job pushFilters(conf, tableScan, this.mrwork); } - Path[] finalDirs = processPathsForMmRead(dirs, conf, validTxnList); + Path[] finalDirs = processPathsForMmRead(dirs, conf, validMmWriteIdList); if (finalDirs == null) { return; // No valid inputs. } @@ -503,13 +503,13 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job } public static Path[] processPathsForMmRead(List dirs, JobConf conf, - ValidTxnList validTxnList) throws IOException { - if (validTxnList == null) { + ValidWriteIdList validWriteIdList) throws IOException { + if (validWriteIdList == null) { return dirs.toArray(new Path[dirs.size()]); } else { List finalPaths = new ArrayList<>(dirs.size()); for (Path dir : dirs) { - processForWriteIds(dir, conf, validTxnList, finalPaths); + processForWriteIds(dir, conf, validWriteIdList, finalPaths); } if (finalPaths.isEmpty()) { LOG.warn("No valid inputs found in " + dirs); @@ -520,7 +520,7 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job } private static void processForWriteIds(Path dir, JobConf conf, - ValidTxnList validTxnList, List finalPaths) throws IOException { + ValidWriteIdList validWriteIdList, List finalPaths) throws IOException { FileSystem fs = dir.getFileSystem(conf); if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("Checking " + dir + " (root) for inputs"); @@ -546,10 +546,10 @@ private static void processForWriteIds(Path dir, JobConf conf, } if (!file.isDirectory()) { Utilities.FILE_OP_LOGGER.warn("Ignoring a file not in MM directory " + path); - } else if (JavaUtils.extractTxnId(path) == null) { + } else if (JavaUtils.extractWriteId(path) == null) { subdirs.add(path); } else if (!hadAcidState) { - AcidUtils.Directory dirInfo = AcidUtils.getAcidState(currDir, conf, validTxnList, Ref.from(false), true, null); + AcidUtils.Directory dirInfo = AcidUtils.getAcidState(currDir, conf, validWriteIdList, Ref.from(false), true, null); hadAcidState = true; // Find the base, created for IOW. @@ -861,6 +861,8 @@ protected void pushProjectionsAndFilters(JobConf jobConf, Class inputFormatClass AcidUtils.setAcidTableScan(job, ts.getConf().isAcidTable()); AcidUtils.setAcidOperationalProperties(job, ts.getConf().getAcidOperationalProperties()); + AcidUtils.setValidWriteIdList(job, ts.getConf()); + } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java index 0c37203..1f673da 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java @@ -88,7 +88,7 @@ public static void toArray(RecordIdentifier ri, Object[] struct) { Arrays.fill(struct, null); return; } - struct[Field.transactionId.ordinal()] = ri.getTransactionId(); + struct[Field.transactionId.ordinal()] = ri.getWriteId(); struct[Field.bucketId.ordinal()] = ri.getBucketProperty(); struct[Field.rowId.ordinal()] = ri.getRowId(); } @@ -101,20 +101,20 @@ public static void toArray(RecordIdentifier ri, Object[] struct) { public RecordIdentifier() { } - public RecordIdentifier(long transactionId, int bucket, long rowId) { - this.transactionId = transactionId; + public RecordIdentifier(long writeId, int bucket, long rowId) { + this.transactionId = writeId; this.bucketId = bucket; this.rowId = rowId; } /** * Set the identifier. - * @param transactionId the transaction id + * @param writeId the write id * @param bucketId the bucket id * @param rowId the row id */ - public void setValues(long transactionId, int bucketId, long rowId) { - this.transactionId = transactionId; + public void setValues(long writeId, int bucketId, long rowId) { + this.transactionId = writeId; this.bucketId = bucketId; this.rowId = rowId; } @@ -134,10 +134,10 @@ public void setRowId(long rowId) { } /** - * What was the original transaction id for the last row? - * @return the transaction id + * What was the original write id for the last row? + * @return the write id */ - public long getTransactionId() { + public long getWriteId() { return transactionId; } @@ -223,7 +223,7 @@ public String toString() { BucketCodec.determineVersion(bucketId); String s = "(" + codec.getVersion() + "." + codec.decodeWriterId(bucketId) + "." + codec.decodeStatementId(bucketId) + ")"; - return "{originalTxn: " + transactionId + ", " + bucketToString() + ", row: " + getRowId() +"}"; + return "{originalWriteId: " + transactionId + ", " + bucketToString() + ", row: " + getRowId() +"}"; } protected String bucketToString() { BucketCodec codec = diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordUpdater.java index 36111f0..0aed172 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordUpdater.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordUpdater.java @@ -30,27 +30,26 @@ /** * Insert a new record into the table. - * @param currentTransaction the transaction id of the current transaction. + * @param currentWriteId the table write id of the current transaction. * @param row the row of data to insert * @throws IOException */ - void insert(long currentTransaction, - Object row) throws IOException; + void insert(long currentWriteId, Object row) throws IOException; /** * Update an old record with a new set of values. - * @param currentTransaction the current transaction id + * @param currentWriteId the current write id * @param row the new values for the row * @throws IOException */ - void update(long currentTransaction, Object row) throws IOException; + void update(long currentWriteId, Object row) throws IOException; /** * Delete a row from the table. - * @param currentTransaction the current transaction id + * @param currentWriteId the current write id * @throws IOException */ - void delete(long currentTransaction, Object row) throws IOException; + void delete(long currentWriteId, Object row) throws IOException; /** * Flush the current set of rows to the underlying file system, so that diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index ff2cc04..eb30626 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@ -49,8 +49,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.Metastore; @@ -570,7 +570,7 @@ public boolean validateInput(FileSystem fs, HiveConf conf, private final boolean forceThreadpool; private final AtomicInteger cacheHitCounter = new AtomicInteger(0); private final AtomicInteger numFilesCounter = new AtomicInteger(0); - private final ValidTxnList transactionList; + private final ValidWriteIdList writeIdList; private SplitStrategyKind splitStrategyKind; private final SearchArgument sarg; private final AcidOperationalProperties acidOperationalProperties; @@ -652,8 +652,8 @@ public boolean validateInput(FileSystem fs, HiveConf conf, footerCache = useExternalCache ? metaCache : localCache; } } - String value = conf.get(ValidTxnList.VALID_TXNS_KEY); - transactionList = value == null ? new ValidReadTxnList() : new ValidReadTxnList(value); + String value = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY); + writeIdList = value == null ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(value); // Determine the transactional_properties of the table from the job conf stored in context. // The table properties are copied to job conf at HiveInputFormat::addSplitsForGroup(), @@ -935,10 +935,6 @@ public Void run() throws Exception { } } - - - - private void runGetSplitsSync(List>> splitFutures, List splits, UserGroupInformation ugi) throws IOException { UserGroupInformation tpUgi = ugi == null ? UserGroupInformation.getCurrentUser() : ugi; @@ -1095,8 +1091,8 @@ public String toString() { /** * For plain or acid tables this is the root of the partition (or table if not partitioned). * For MM table this is delta/ or base/ dir. In MM case applying of the ValidTxnList that - * {@link AcidUtils#getAcidState(Path, Configuration, ValidTxnList)} normally does has already - * been done in {@link HiveInputFormat#processPathsForMmRead(List, JobConf, ValidTxnList)}. + * {@link AcidUtils#getAcidState(Path, Configuration, ValidWriteIdList)} normally does has already + * been done in {@link HiveInputFormat#processPathsForMmRead(List, JobConf, ValidWriteIdList)}. */ private final Path dir; private final Ref useFileIds; @@ -1136,7 +1132,7 @@ public AcidDirInfo run() throws Exception { private AcidDirInfo callInternal() throws IOException { //todo: shouldn't ignoreEmptyFiles be set based on ExecutionEngine? AcidUtils.Directory dirInfo = AcidUtils.getAcidState(dir, context.conf, - context.transactionList, useFileIds, true, null); + context.writeIdList, useFileIds, true, null); // find the base files (original or new style) List baseFiles = new ArrayList<>(); if (dirInfo.getBaseDirectory() == null) { @@ -1175,8 +1171,8 @@ private AcidDirInfo callInternal() throws IOException { AcidUtils.AcidBaseFileType.ORIGINAL_BASE : AcidUtils.AcidBaseFileType.ACID_SCHEMA; PathFilter bucketFilter = parsedDelta.isRawFormat() ? AcidUtils.originalBucketFilter : AcidUtils.bucketFileFilter; - if(parsedDelta.isRawFormat() && parsedDelta.getMinTransaction() != - parsedDelta.getMaxTransaction()) { + if(parsedDelta.isRawFormat() && parsedDelta.getMinWriteId() != + parsedDelta.getMaxWriteId()) { //delta/ with files in raw format are a result of Load Data (as opposed to compaction //or streaming ingest so must have interval length == 1. throw new IllegalStateException("Delta in " + AcidUtils.AcidBaseFileType.ORIGINAL_BASE @@ -2012,12 +2008,12 @@ public float getProgress() throws IOException { final Reader.Options readOptions = OrcInputFormat.createOptionsForReader(conf); readOptions.range(split.getStart(), split.getLength()); - String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); - ValidTxnList validTxnList = txnString == null ? new ValidReadTxnList() : - new ValidReadTxnList(txnString); + String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY); + ValidWriteIdList validWriteIdList = txnString == null ? new ValidReaderWriteIdList() : + new ValidReaderWriteIdList(txnString); final OrcRawRecordMerger records = new OrcRawRecordMerger(conf, true, reader, split.isOriginal(), bucket, - validTxnList, readOptions, deltas, mergerOptions); + validWriteIdList, readOptions, deltas, mergerOptions); return new RowReader() { OrcStruct innerRecord = records.createValue(); @@ -2299,7 +2295,7 @@ private static boolean isStripeSatisfyPredicate( public RawReader getRawReader(Configuration conf, boolean collapseEvents, int bucket, - ValidTxnList validTxnList, + ValidWriteIdList validWriteIdList, Path baseDirectory, Path[] deltaDirectory ) throws IOException { @@ -2323,7 +2319,7 @@ private static boolean isStripeSatisfyPredicate( mergerOptions.rootPath(deltaDirectory[0].getParent()); } return new OrcRawRecordMerger(conf, collapseEvents, null, isOriginal, - bucket, validTxnList, new Reader.Options(), deltaDirectory, mergerOptions); + bucket, validWriteIdList, new Reader.Options(), deltaDirectory, mergerOptions); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java index f1f638d..57e005d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java @@ -204,20 +204,20 @@ private DummyOrcRecordUpdater(Path path, Options options) { } @Override - public void insert(long currentTransaction, Object row) throws IOException { - out.println("insert " + path + " currTxn: " + currentTransaction + + public void insert(long currentWriteId, Object row) throws IOException { + out.println("insert " + path + " currWriteId: " + currentWriteId + " obj: " + stringifyObject(row, inspector)); } @Override - public void update(long currentTransaction, Object row) throws IOException { - out.println("update " + path + " currTxn: " + currentTransaction + + public void update(long currentWriteId, Object row) throws IOException { + out.println("update " + path + " currWriteId: " + currentWriteId + " obj: " + stringifyObject(row, inspector)); } @Override - public void delete(long currentTransaction, Object row) throws IOException { - out.println("delete " + path + " currTxn: " + currentTransaction + " obj: " + row); + public void delete(long currentWriteId, Object row) throws IOException { + out.println("delete " + path + " currWriteId: " + currentWriteId + " obj: " + row); } @Override @@ -307,7 +307,7 @@ public void write(Writable w) throws IOException { watcher.addKey( ((IntWritable) orc.getFieldValue(OrcRecordUpdater.OPERATION)).get(), ((LongWritable) - orc.getFieldValue(OrcRecordUpdater.ORIGINAL_TRANSACTION)).get(), + orc.getFieldValue(OrcRecordUpdater.ORIGINAL_WRITEID)).get(), ((IntWritable) orc.getFieldValue(OrcRecordUpdater.BUCKET)).get(), ((LongWritable) orc.getFieldValue(OrcRecordUpdater.ROW_ID)).get()); writer.addRow(w); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java index 779da4f..7c99964 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java @@ -37,7 +37,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.ql.io.AcidInputFormat; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.RecordIdentifier; @@ -60,7 +60,7 @@ private final ObjectInspector objectInspector; private final long offset; private final long length; - private final ValidTxnList validTxnList; + private final ValidWriteIdList validWriteIdList; private final int columns; private final ReaderKey prevKey = new ReaderKey(); // this is the key less than the lowest key we need to process @@ -70,15 +70,15 @@ // an extra value so that we can return it while reading ahead private OrcStruct extraValue; /** - * A RecordIdentifier extended with the current transaction id. This is the - * key of our merge sort with the originalTransaction, bucket, and rowId - * ascending and the currentTransaction, statementId descending. This means that if the + * A RecordIdentifier extended with the current write id. This is the + * key of our merge sort with the originalWriteId, bucket, and rowId + * ascending and the currentWriteId, statementId descending. This means that if the * reader is collapsing events to just the last update, just the first * instance of each record is required. */ @VisibleForTesting public final static class ReaderKey extends RecordIdentifier{ - private long currentTransactionId; + private long currentWriteId; /** * This is the value from delta file name which may be different from value encode in * {@link RecordIdentifier#getBucketProperty()} in case of Update/Delete. @@ -86,54 +86,54 @@ * or delete event. For Acid 2.0 + multi-stmt txn, it must be a delete event. * No 2 Insert events from can ever agree on {@link RecordIdentifier} */ - private int statementId;//sort on this descending, like currentTransactionId + private int statementId;//sort on this descending, like currentWriteId ReaderKey() { this(-1, -1, -1, -1, 0); } - ReaderKey(long originalTransaction, int bucket, long rowId, - long currentTransactionId) { - this(originalTransaction, bucket, rowId, currentTransactionId, 0); + ReaderKey(long originalWriteId, int bucket, long rowId, + long currentWriteId) { + this(originalWriteId, bucket, rowId, currentWriteId, 0); } /** * @param statementId - set this to 0 if N/A */ - public ReaderKey(long originalTransaction, int bucket, long rowId, - long currentTransactionId, int statementId) { - super(originalTransaction, bucket, rowId); - this.currentTransactionId = currentTransactionId; + public ReaderKey(long originalWriteId, int bucket, long rowId, + long currentWriteId, int statementId) { + super(originalWriteId, bucket, rowId); + this.currentWriteId = currentWriteId; this.statementId = statementId; } @Override public void set(RecordIdentifier other) { super.set(other); - currentTransactionId = ((ReaderKey) other).currentTransactionId; + currentWriteId = ((ReaderKey) other).currentWriteId; statementId = ((ReaderKey) other).statementId; } - public void setValues(long originalTransactionId, + public void setValues(long originalWriteId, int bucket, long rowId, - long currentTransactionId, + long currentWriteId, int statementId) { - setValues(originalTransactionId, bucket, rowId); - this.currentTransactionId = currentTransactionId; + setValues(originalWriteId, bucket, rowId); + this.currentWriteId = currentWriteId; this.statementId = statementId; } @Override public boolean equals(Object other) { return super.equals(other) && - currentTransactionId == ((ReaderKey) other).currentTransactionId + currentWriteId == ((ReaderKey) other).currentWriteId && statementId == ((ReaderKey) other).statementId//consistent with compareTo() ; } @Override public int hashCode() { int result = super.hashCode(); - result = 31 * result + (int)(currentTransactionId ^ (currentTransactionId >>> 32)); + result = 31 * result + (int)(currentWriteId ^ (currentWriteId >>> 32)); result = 31 * result + statementId; return result; } @@ -145,8 +145,8 @@ public int compareTo(RecordIdentifier other) { if (sup == 0) { if (other.getClass() == ReaderKey.class) { ReaderKey oth = (ReaderKey) other; - if (currentTransactionId != oth.currentTransactionId) { - return currentTransactionId < oth.currentTransactionId ? +1 : -1; + if (currentWriteId != oth.currentWriteId) { + return currentWriteId < oth.currentWriteId ? +1 : -1; } if(statementId != oth.statementId) { return statementId < oth.statementId ? +1 : -1; @@ -162,15 +162,15 @@ public int compareTo(RecordIdentifier other) { * This means 1 txn modified the same row more than once */ private boolean isSameRow(ReaderKey other) { - return compareRow(other) == 0 && currentTransactionId == other.currentTransactionId; + return compareRow(other) == 0 && currentWriteId == other.currentWriteId; } - long getCurrentTransactionId() { - return currentTransactionId; + long getCurrentWriteId() { + return currentWriteId; } /** - * Compare rows without considering the currentTransactionId. + * Compare rows without considering the currentWriteId. * @param other the value to compare to * @return -1, 0, +1 */ @@ -180,9 +180,9 @@ int compareRow(RecordIdentifier other) { @Override public String toString() { - return "{originalTxn: " + getTransactionId() + ", " + - bucketToString() + ", row: " + getRowId() + ", currentTxn: " + - currentTransactionId + ", statementId: "+ statementId + "}"; + return "{originalWriteId: " + getWriteId() + ", " + + bucketToString() + ", row: " + getRowId() + ", currentWriteId " + + currentWriteId + ", statementId: "+ statementId + "}"; } } interface ReaderPair { @@ -389,9 +389,9 @@ final boolean nextFromCurrentFile(OrcStruct next) throws IOException { IntWritable operation = new IntWritable(OrcRecordUpdater.INSERT_OPERATION); nextRecord().setFieldValue(OrcRecordUpdater.OPERATION, operation); - nextRecord().setFieldValue(OrcRecordUpdater.CURRENT_TRANSACTION, + nextRecord().setFieldValue(OrcRecordUpdater.CURRENT_WRITEID, new LongWritable(transactionId)); - nextRecord().setFieldValue(OrcRecordUpdater.ORIGINAL_TRANSACTION, + nextRecord().setFieldValue(OrcRecordUpdater.ORIGINAL_WRITEID, new LongWritable(transactionId)); nextRecord().setFieldValue(OrcRecordUpdater.BUCKET, new IntWritable(bucketProperty)); @@ -403,11 +403,11 @@ final boolean nextFromCurrentFile(OrcStruct next) throws IOException { nextRecord = next; ((IntWritable) next.getFieldValue(OrcRecordUpdater.OPERATION)) .set(OrcRecordUpdater.INSERT_OPERATION); - ((LongWritable) next.getFieldValue(OrcRecordUpdater.ORIGINAL_TRANSACTION)) + ((LongWritable) next.getFieldValue(OrcRecordUpdater.ORIGINAL_WRITEID)) .set(transactionId); ((IntWritable) next.getFieldValue(OrcRecordUpdater.BUCKET)) .set(bucketProperty); - ((LongWritable) next.getFieldValue(OrcRecordUpdater.CURRENT_TRANSACTION)) + ((LongWritable) next.getFieldValue(OrcRecordUpdater.CURRENT_WRITEID)) .set(transactionId); ((LongWritable) next.getFieldValue(OrcRecordUpdater.ROW_ID)) .set(nextRowId); @@ -445,7 +445,7 @@ static int encodeBucketId(Configuration conf, int bucketId, int statementId) { OriginalReaderPairToRead(ReaderKey key, Reader reader, int bucketId, final RecordIdentifier minKey, final RecordIdentifier maxKey, Reader.Options options, Options mergerOptions, Configuration conf, - ValidTxnList validTxnList, int statementId) throws IOException { + ValidWriteIdList validWriteIdList, int statementId) throws IOException { super(key, bucketId, conf, mergerOptions, statementId); this.reader = reader; assert !mergerOptions.isCompacting(); @@ -473,7 +473,7 @@ static int encodeBucketId(Configuration conf, int bucketId, int statementId) { */ //the split is from something other than the 1st file of the logical bucket - compute offset AcidUtils.Directory directoryState = AcidUtils.getAcidState(mergerOptions.getRootPath(), - conf, validTxnList, false, true); + conf, validWriteIdList, false, true); for (HadoopShims.HdfsFileStatusWithId f : directoryState.getOriginalFiles()) { AcidOutputFormat.Options bucketOptions = AcidUtils.parseBaseOrDeltaBucketFilename(f.getFileStatus().getPath(), conf); @@ -577,7 +577,7 @@ public void next(OrcStruct next) throws IOException { OriginalReaderPairToCompact(ReaderKey key, int bucketId, Reader.Options options, Options mergerOptions, Configuration conf, - ValidTxnList validTxnList, int statementId) throws IOException { + ValidWriteIdList validWriteIdList, int statementId) throws IOException { super(key, bucketId, conf, mergerOptions, statementId); assert mergerOptions.isCompacting() : "Should only be used for Compaction"; this.conf = conf; @@ -588,7 +588,7 @@ public void next(OrcStruct next) throws IOException { assert options.getOffset() == 0; assert options.getMaxOffset() == Long.MAX_VALUE; AcidUtils.Directory directoryState = AcidUtils.getAcidState( - mergerOptions.getRootPath(), conf, validTxnList, false, true); + mergerOptions.getRootPath(), conf, validWriteIdList, false, true); /** * Note that for reading base_x/ or delta_x_x/ with non-acid schema, * {@link Options#getRootPath()} is set to base_x/ or delta_x_x/ which causes all it's @@ -714,7 +714,7 @@ private KeyInterval discoverOriginalKeyBounds(Reader reader, int bucket, boolean isTail = true; RecordIdentifier minKey = null; RecordIdentifier maxKey = null; - TransactionMetaData tfp = TransactionMetaData.findTransactionIDForSynthetcRowIDs( + TransactionMetaData tfp = TransactionMetaData.findWriteIDForSynthetcRowIDs( mergerOptions.getBucketPath(), mergerOptions.getRootPath(), conf); int bucketProperty = encodeBucketId(conf, bucket, tfp.statementId); /** @@ -939,13 +939,13 @@ public Options clone() { Reader reader, boolean isOriginal, int bucket, - ValidTxnList validTxnList, + ValidWriteIdList validWriteIdList, Reader.Options options, Path[] deltaDirectory, Options mergerOptions) throws IOException { this.collapse = collapseEvents; this.offset = options.getOffset(); this.length = options.getLength(); - this.validTxnList = validTxnList; + this.validWriteIdList = validWriteIdList; /** * @since Hive 3.0 * With split update (HIVE-14035) we have base/, delta/ and delete_delta/ - the latter only @@ -1028,7 +1028,7 @@ public Options clone() { AcidUtils.parseBase(mergerOptions.getBaseDir()), mergerOptions.getBaseDir()); } pair = new OriginalReaderPairToCompact(key, bucket, options, readerPairOptions, - conf, validTxnList, + conf, validWriteIdList, 0);//0 since base_x doesn't have a suffix (neither does pre acid write) } else { assert mergerOptions.getBucketPath() != null : " since this is not compaction: " @@ -1036,14 +1036,14 @@ public Options clone() { //if here it's a non-acid schema file - check if from before table was marked transactional //or in base_x/delta_x_x from Load Data Options readerPairOptions = mergerOptions; - TransactionMetaData tfp = TransactionMetaData.findTransactionIDForSynthetcRowIDs( + TransactionMetaData tfp = TransactionMetaData.findWriteIDForSynthetcRowIDs( mergerOptions.getBucketPath(), mergerOptions.getRootPath(), conf); - if(tfp.syntheticTransactionId > 0) { + if(tfp.syntheticWriteId > 0) { readerPairOptions = modifyForNonAcidSchemaRead(mergerOptions, - tfp.syntheticTransactionId, tfp.folder); + tfp.syntheticWriteId, tfp.folder); } pair = new OriginalReaderPairToRead(key, reader, bucket, keyInterval.getMinKey(), - keyInterval.getMaxKey(), options, readerPairOptions, conf, validTxnList, tfp.statementId); + keyInterval.getMaxKey(), options, readerPairOptions, conf, validWriteIdList, tfp.statementId); } } else { if(mergerOptions.isCompacting()) { @@ -1101,10 +1101,10 @@ public Options clone() { assert mergerOptions.isCompacting() : "during regular read anything which is not a" + " delete_delta is treated like base: " + delta; Options rawCompactOptions = modifyForNonAcidSchemaRead(mergerOptions, - deltaDir.getMinTransaction(), delta); + deltaDir.getMinWriteId(), delta); //this will also handle copy_N files if any ReaderPair deltaPair = new OriginalReaderPairToCompact(key, bucket, options, - rawCompactOptions, conf, validTxnList, deltaDir.getStatementId()); + rawCompactOptions, conf, validWriteIdList, deltaDir.getStatementId()); if (deltaPair.nextRecord() != null) { readers.put(key, deltaPair); } @@ -1170,24 +1170,24 @@ public Options clone() { * type files into a base_x/ or delta_x_x. The data in these are then assigned ROW_IDs at read * time and made permanent at compaction time. This is identical to how 'original' files (i.e. * those that existed in the table before it was converted to an Acid table) except that the - * transaction ID to use in the ROW_ID should be that of the transaction that ran the Load Data. + * write ID to use in the ROW_ID should be that of the transaction that ran the Load Data. */ static final class TransactionMetaData { - final long syntheticTransactionId; + final long syntheticWriteId; /** * folder which determines the transaction id to use in synthetic ROW_IDs */ final Path folder; final int statementId; - TransactionMetaData(long syntheticTransactionId, Path folder) { - this(syntheticTransactionId, folder, 0); + TransactionMetaData(long syntheticWriteId, Path folder) { + this(syntheticWriteId, folder, 0); } - TransactionMetaData(long syntheticTransactionId, Path folder, int statementId) { - this.syntheticTransactionId = syntheticTransactionId; + TransactionMetaData(long syntheticWriteId, Path folder, int statementId) { + this.syntheticWriteId = syntheticWriteId; this.folder = folder; this.statementId = statementId; } - static TransactionMetaData findTransactionIDForSynthetcRowIDs(Path splitPath, Path rootPath, + static TransactionMetaData findWriteIDForSynthetcRowIDs(Path splitPath, Path rootPath, Configuration conf) throws IOException { Path parent = splitPath.getParent(); if(rootPath.equals(parent)) { @@ -1205,10 +1205,10 @@ static TransactionMetaData findTransactionIDForSynthetcRowIDs(Path splitPath, Pa else { AcidUtils.ParsedDelta pd = AcidUtils.parsedDelta(parent, AcidUtils.DELTA_PREFIX, parent.getFileSystem(conf)); - assert pd.getMinTransaction() == pd.getMaxTransaction() : + assert pd.getMinWriteId() == pd.getMaxWriteId() : "This a delta with raw non acid schema, must be result of single write, no compaction: " + splitPath; - return new TransactionMetaData(pd.getMinTransaction(), parent, pd.getStatementId()); + return new TransactionMetaData(pd.getMinWriteId(), parent, pd.getStatementId()); } } parent = parent.getParent(); @@ -1227,7 +1227,7 @@ static TransactionMetaData findTransactionIDForSynthetcRowIDs(Path splitPath, Pa /** * This is done to read non-acid schema files ("original") located in base_x/ or delta_x_x/ which * happens as a result of Load Data statement. Setting {@code rootPath} to base_x/ or delta_x_x - * causes {@link AcidUtils#getAcidState(Path, Configuration, ValidTxnList)} in subsequent + * causes {@link AcidUtils#getAcidState(Path, Configuration, ValidWriteIdList)} in subsequent * {@link OriginalReaderPair} object to return the files in this dir * in {@link AcidUtils.Directory#getOriginalFiles()} * @return modified clone of {@code baseOptions} @@ -1350,8 +1350,8 @@ public boolean next(RecordIdentifier recordIdentifier, } // if this transaction isn't ok, skip over it - if (!validTxnList.isTxnValid( - ((ReaderKey) recordIdentifier).getCurrentTransactionId())) { + if (!validWriteIdList.isWriteIdValid( + ((ReaderKey) recordIdentifier).getCurrentWriteId())) { continue; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java index b90ce6e..2e4db22 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java @@ -55,7 +55,7 @@ * A RecordUpdater where the files are stored as ORC. * A note on various record structures: the {@code row} coming in (as in {@link #insert(long, Object)} * for example), is a struct like but what is written to the file - * * is > (see {@link #createEventSchema(ObjectInspector)}) + * * is > (see {@link #createEventSchema(ObjectInspector)}) * So there are OIs here to make the translation. */ public class OrcRecordUpdater implements RecordUpdater { @@ -72,10 +72,10 @@ final static int DELETE_OPERATION = 2; //column indexes of corresponding data in storage layer final static int OPERATION = 0; - final static int ORIGINAL_TRANSACTION = 1; + final static int ORIGINAL_WRITEID = 1; final static int BUCKET = 2; final static int ROW_ID = 3; - final static int CURRENT_TRANSACTION = 4; + final static int CURRENT_WRITEID = 4; final static int ROW = 5; /** * total number of fields (above) @@ -100,8 +100,8 @@ private final FSDataOutputStream flushLengths; private final OrcStruct item; private final IntWritable operation = new IntWritable(); - private final LongWritable currentTransaction = new LongWritable(-1); - private final LongWritable originalTransaction = new LongWritable(-1); + private final LongWritable currentWriteId = new LongWritable(-1); + private final LongWritable originalWriteId = new LongWritable(-1); private final IntWritable bucket = new IntWritable(); private final LongWritable rowId = new LongWritable(); private long insertedRows = 0; @@ -112,12 +112,12 @@ private KeyIndexBuilder deleteEventIndexBuilder; private StructField recIdField = null; // field to look for the record identifier in private StructField rowIdField = null; // field inside recId to look for row id in - private StructField originalTxnField = null; // field inside recId to look for original txn in + private StructField originalWriteIdField = null; // field inside recId to look for original write id in private StructField bucketField = null; // field inside recId to look for bucket in private StructObjectInspector rowInspector; // OI for the original row private StructObjectInspector recIdInspector; // OI for the record identifier struct private LongObjectInspector rowIdInspector; // OI for the long row id inside the recordIdentifier - private LongObjectInspector origTxnInspector; // OI for the original txn inside the record + private LongObjectInspector origWriteIdInspector; // OI for the original write id inside the record // identifer private IntObjectInspector bucketInspector; @@ -126,11 +126,11 @@ static int getOperation(OrcStruct struct) { } static long getCurrentTransaction(OrcStruct struct) { - return ((LongWritable) struct.getFieldValue(CURRENT_TRANSACTION)).get(); + return ((LongWritable) struct.getFieldValue(CURRENT_WRITEID)).get(); } static long getOriginalTransaction(OrcStruct struct) { - return ((LongWritable) struct.getFieldValue(ORIGINAL_TRANSACTION)).get(); + return ((LongWritable) struct.getFieldValue(ORIGINAL_WRITEID)).get(); } static int getBucket(OrcStruct struct) { @@ -184,15 +184,13 @@ static StructObjectInspector createEventSchema(ObjectInspector rowInspector) { fields.add(new OrcStruct.Field("operation", PrimitiveObjectInspectorFactory.writableIntObjectInspector, OPERATION)); fields.add(new OrcStruct.Field("originalTransaction", - PrimitiveObjectInspectorFactory.writableLongObjectInspector, - ORIGINAL_TRANSACTION)); + PrimitiveObjectInspectorFactory.writableLongObjectInspector, ORIGINAL_WRITEID)); fields.add(new OrcStruct.Field("bucket", PrimitiveObjectInspectorFactory.writableIntObjectInspector, BUCKET)); fields.add(new OrcStruct.Field("rowId", PrimitiveObjectInspectorFactory.writableLongObjectInspector, ROW_ID)); fields.add(new OrcStruct.Field("currentTransaction", - PrimitiveObjectInspectorFactory.writableLongObjectInspector, - CURRENT_TRANSACTION)); + PrimitiveObjectInspectorFactory.writableLongObjectInspector, CURRENT_WRITEID)); fields.add(new OrcStruct.Field("row", rowInspector, ROW)); return new OrcStruct.OrcStructInspector(fields); } @@ -246,7 +244,7 @@ static StructObjectInspector createEventSchema(ObjectInspector rowInspector) { } } } - if (options.getMinimumTransactionId() != options.getMaximumTransactionId() + if (options.getMinimumWriteId() != options.getMaximumWriteId() && !options.isWritingBase()){ //throw if file already exists as that should never happen flushLengths = fs.create(OrcAcidUtils.getSideFile(this.path), false, 8, @@ -316,8 +314,8 @@ static StructObjectInspector createEventSchema(ObjectInspector rowInspector) { options.getRecordIdColumn()))); item = new OrcStruct(FIELDS); item.setFieldValue(OPERATION, operation); - item.setFieldValue(CURRENT_TRANSACTION, currentTransaction); - item.setFieldValue(ORIGINAL_TRANSACTION, originalTransaction); + item.setFieldValue(CURRENT_WRITEID, currentWriteId); + item.setFieldValue(ORIGINAL_WRITEID, originalWriteId); item.setFieldValue(BUCKET, bucket); item.setFieldValue(ROW_ID, rowId); } @@ -342,9 +340,9 @@ private ObjectInspector findRecId(ObjectInspector inspector, int rowIdColNum) { List fields = ((StructObjectInspector) recIdField.getFieldObjectInspector()).getAllStructFieldRefs(); // Go by position, not field name, as field names aren't guaranteed. The order of fields - // in RecordIdentifier is transactionId, bucketId, rowId - originalTxnField = fields.get(0); - origTxnInspector = (LongObjectInspector)originalTxnField.getFieldObjectInspector(); + // in RecordIdentifier is writeId, bucketId, rowId + originalWriteIdField = fields.get(0); + origWriteIdInspector = (LongObjectInspector)originalWriteIdField.getFieldObjectInspector(); bucketField = fields.get(1); bucketInspector = (IntObjectInspector) bucketField.getFieldObjectInspector(); rowIdField = fields.get(2); @@ -361,27 +359,27 @@ private ObjectInspector findRecId(ObjectInspector inspector, int rowIdColNum) { * thus even for unbucketed tables, the N in bucket_N file name matches writerId/bucketId even for * late split */ - private void addSimpleEvent(int operation, long currentTransaction, long rowId, Object row) + private void addSimpleEvent(int operation, long currentWriteId, long rowId, Object row) throws IOException { this.operation.set(operation); - this.currentTransaction.set(currentTransaction); + this.currentWriteId.set(currentWriteId); Integer currentBucket = null; - // If this is an insert, originalTransaction should be set to this transaction. If not, + // If this is an insert, originalWriteId should be set to this transaction. If not, // it will be reset by the following if anyway. - long originalTransaction = currentTransaction; + long originalWriteId = currentWriteId; if (operation == DELETE_OPERATION || operation == UPDATE_OPERATION) { Object rowIdValue = rowInspector.getStructFieldData(row, recIdField); - originalTransaction = origTxnInspector.get( - recIdInspector.getStructFieldData(rowIdValue, originalTxnField)); + originalWriteId = origWriteIdInspector.get( + recIdInspector.getStructFieldData(rowIdValue, originalWriteIdField)); rowId = rowIdInspector.get(recIdInspector.getStructFieldData(rowIdValue, rowIdField)); currentBucket = setBucket(bucketInspector.get( recIdInspector.getStructFieldData(rowIdValue, bucketField)), operation); } this.rowId.set(rowId); - this.originalTransaction.set(originalTransaction); + this.originalWriteId.set(originalWriteId); item.setFieldValue(OrcRecordUpdater.OPERATION, new IntWritable(operation)); item.setFieldValue(OrcRecordUpdater.ROW, (operation == DELETE_OPERATION ? null : row)); - indexBuilder.addKey(operation, originalTransaction, bucket.get(), rowId); + indexBuilder.addKey(operation, originalWriteId, bucket.get(), rowId); if (writer == null) { writer = OrcFile.createWriter(path, writerOptions); } @@ -389,18 +387,18 @@ private void addSimpleEvent(int operation, long currentTransaction, long rowId, restoreBucket(currentBucket, operation); } - private void addSplitUpdateEvent(int operation, long currentTransaction, long rowId, Object row) + private void addSplitUpdateEvent(int operation, long currentWriteId, long rowId, Object row) throws IOException { if (operation == INSERT_OPERATION) { // Just insert the record in the usual way, i.e., default to the simple behavior. - addSimpleEvent(operation, currentTransaction, rowId, row); + addSimpleEvent(operation, currentWriteId, rowId, row); return; } this.operation.set(operation); - this.currentTransaction.set(currentTransaction); + this.currentWriteId.set(currentWriteId); Object rowValue = rowInspector.getStructFieldData(row, recIdField); - long originalTransaction = origTxnInspector.get( - recIdInspector.getStructFieldData(rowValue, originalTxnField)); + long originalWriteId = origWriteIdInspector.get( + recIdInspector.getStructFieldData(rowValue, originalWriteIdField)); rowId = rowIdInspector.get( recIdInspector.getStructFieldData(rowValue, rowIdField)); Integer currentBucket = null; @@ -423,54 +421,54 @@ private void addSplitUpdateEvent(int operation, long currentTransaction, long ro // A delete/update generates a delete event for the original row. this.rowId.set(rowId); - this.originalTransaction.set(originalTransaction); + this.originalWriteId.set(originalWriteId); item.setFieldValue(OrcRecordUpdater.OPERATION, new IntWritable(DELETE_OPERATION)); item.setFieldValue(OrcRecordUpdater.ROW, null); // ROW is null for delete events. - deleteEventIndexBuilder.addKey(DELETE_OPERATION, originalTransaction, bucket.get(), rowId); + deleteEventIndexBuilder.addKey(DELETE_OPERATION, originalWriteId, bucket.get(), rowId); deleteEventWriter.addRow(item); restoreBucket(currentBucket, operation); } if (operation == UPDATE_OPERATION) { // A new row is also inserted in the usual delta file for an update event. - addSimpleEvent(INSERT_OPERATION, currentTransaction, insertedRows++, row); + addSimpleEvent(INSERT_OPERATION, currentWriteId, insertedRows++, row); } } @Override - public void insert(long currentTransaction, Object row) throws IOException { - if (this.currentTransaction.get() != currentTransaction) { + public void insert(long currentWriteId, Object row) throws IOException { + if (this.currentWriteId.get() != currentWriteId) { insertedRows = 0; } if (acidOperationalProperties.isSplitUpdate()) { - addSplitUpdateEvent(INSERT_OPERATION, currentTransaction, insertedRows++, row); + addSplitUpdateEvent(INSERT_OPERATION, currentWriteId, insertedRows++, row); } else { - addSimpleEvent(INSERT_OPERATION, currentTransaction, insertedRows++, row); + addSimpleEvent(INSERT_OPERATION, currentWriteId, insertedRows++, row); } rowCountDelta++; } @Override - public void update(long currentTransaction, Object row) throws IOException { - if (this.currentTransaction.get() != currentTransaction) { + public void update(long currentWriteId, Object row) throws IOException { + if (this.currentWriteId.get() != currentWriteId) { insertedRows = 0; } if (acidOperationalProperties.isSplitUpdate()) { - addSplitUpdateEvent(UPDATE_OPERATION, currentTransaction, -1L, row); + addSplitUpdateEvent(UPDATE_OPERATION, currentWriteId, -1L, row); } else { - addSimpleEvent(UPDATE_OPERATION, currentTransaction, -1L, row); + addSimpleEvent(UPDATE_OPERATION, currentWriteId, -1L, row); } } @Override - public void delete(long currentTransaction, Object row) throws IOException { - if (this.currentTransaction.get() != currentTransaction) { + public void delete(long currentWriteId, Object row) throws IOException { + if (this.currentWriteId.get() != currentWriteId) { insertedRows = 0; } if (acidOperationalProperties.isSplitUpdate()) { - addSplitUpdateEvent(DELETE_OPERATION, currentTransaction, -1L, row); + addSplitUpdateEvent(DELETE_OPERATION, currentWriteId, -1L, row); } else { - addSimpleEvent(DELETE_OPERATION, currentTransaction, -1L, row); + addSimpleEvent(DELETE_OPERATION, currentWriteId, -1L, row); } rowCountDelta--; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java index da20004..cbd1fa9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java @@ -27,8 +27,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; @@ -72,7 +72,7 @@ protected float progress = 0.0f; protected Object[] partitionValues; private boolean addPartitionCols = true; - private final ValidTxnList validTxnList; + private final ValidWriteIdList validWriteIdList; private final DeleteEventRegistry deleteEventRegistry; /** * {@link RecordIdentifier}/{@link VirtualColumn#ROWID} information @@ -183,8 +183,8 @@ private VectorizedOrcAcidRowBatchReader(JobConf conf, OrcSplit orcSplit, Reporte partitionValues = null; } - String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); - this.validTxnList = (txnString == null) ? new ValidReadTxnList() : new ValidReadTxnList(txnString); + String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY); + this.validWriteIdList = (txnString == null) ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString); // Clone readerOptions for deleteEvents. Reader.Options deleteEventReaderOptions = readerOptions.clone(); @@ -214,7 +214,7 @@ private VectorizedOrcAcidRowBatchReader(JobConf conf, OrcSplit orcSplit, Reporte } rowIdProjected = areRowIdsProjected(rbCtx); rootPath = orcSplit.getRootDir(); - syntheticProps = computeOffsetAndBucket(orcSplit, conf, validTxnList); + syntheticProps = computeOffsetAndBucket(orcSplit, conf, validWriteIdList); } /** @@ -242,7 +242,7 @@ private OffsetAndBucketProperty(long rowIdOffset, int bucketProperty, long synth * before/during split computation and passing the info in the split. (HIVE-17917) */ private OffsetAndBucketProperty computeOffsetAndBucket( - OrcSplit split, JobConf conf,ValidTxnList validTxnList) throws IOException { + OrcSplit split, JobConf conf,ValidWriteIdList validWriteIdList) throws IOException { if(!needSyntheticRowIds(split.isOriginal(), !deleteEventRegistry.isEmpty(), rowIdProjected)) { if(split.isOriginal()) { /** @@ -252,22 +252,22 @@ private OffsetAndBucketProperty computeOffsetAndBucket( * filter out base/delta files but this makes fewer dependencies) */ OrcRawRecordMerger.TransactionMetaData syntheticTxnInfo = - OrcRawRecordMerger.TransactionMetaData.findTransactionIDForSynthetcRowIDs(split.getPath(), + OrcRawRecordMerger.TransactionMetaData.findWriteIDForSynthetcRowIDs(split.getPath(), split.getRootDir(), conf); return new OffsetAndBucketProperty(-1,-1, - syntheticTxnInfo.syntheticTransactionId); + syntheticTxnInfo.syntheticWriteId); } return null; } long rowIdOffset = 0; OrcRawRecordMerger.TransactionMetaData syntheticTxnInfo = - OrcRawRecordMerger.TransactionMetaData.findTransactionIDForSynthetcRowIDs(split.getPath(), + OrcRawRecordMerger.TransactionMetaData.findWriteIDForSynthetcRowIDs(split.getPath(), split.getRootDir(), conf); int bucketId = AcidUtils.parseBaseOrDeltaBucketFilename(split.getPath(), conf).getBucketId(); int bucketProperty = BucketCodec.V1.encode(new AcidOutputFormat.Options(conf) .statementId(syntheticTxnInfo.statementId).bucket(bucketId)); AcidUtils.Directory directoryState = AcidUtils.getAcidState( syntheticTxnInfo.folder, conf, - validTxnList, false, true); + validWriteIdList, false, true); for (HadoopShims.HdfsFileStatusWithId f : directoryState.getOriginalFiles()) { AcidOutputFormat.Options bucketOptions = AcidUtils.parseBaseOrDeltaBucketFilename(f.getFileStatus().getPath(), conf); @@ -283,7 +283,7 @@ private OffsetAndBucketProperty computeOffsetAndBucket( rowIdOffset += reader.getNumberOfRows(); } return new OffsetAndBucketProperty(rowIdOffset, bucketProperty, - syntheticTxnInfo.syntheticTransactionId); + syntheticTxnInfo.syntheticWriteId); } /** * {@link VectorizedOrcAcidRowBatchReader} is always used for vectorized reads of acid tables. @@ -426,7 +426,7 @@ public boolean next(NullWritable key, VectorizedRowBatch value) throws IOExcepti " to handle original files that require ROW__IDs: " + rootPath); } /** - * {@link RecordIdentifier#getTransactionId()} + * {@link RecordIdentifier#getWriteId()} */ recordIdColumnVector.fields[0].noNulls = true; recordIdColumnVector.fields[0].isRepeating = true; @@ -450,11 +450,11 @@ public boolean next(NullWritable key, VectorizedRowBatch value) throws IOExcepti } //Now populate a structure to use to apply delete events innerRecordIdColumnVector = new ColumnVector[OrcRecordUpdater.FIELDS]; - innerRecordIdColumnVector[OrcRecordUpdater.ORIGINAL_TRANSACTION] = recordIdColumnVector.fields[0]; + innerRecordIdColumnVector[OrcRecordUpdater.ORIGINAL_WRITEID] = recordIdColumnVector.fields[0]; innerRecordIdColumnVector[OrcRecordUpdater.BUCKET] = recordIdColumnVector.fields[1]; innerRecordIdColumnVector[OrcRecordUpdater.ROW_ID] = recordIdColumnVector.fields[2]; //these are insert events so (original txn == current) txn for all rows - innerRecordIdColumnVector[OrcRecordUpdater.CURRENT_TRANSACTION] = recordIdColumnVector.fields[0]; + innerRecordIdColumnVector[OrcRecordUpdater.CURRENT_WRITEID] = recordIdColumnVector.fields[0]; } if(syntheticProps.syntheticTxnId > 0) { //"originals" (written before table was converted to acid) is considered written by @@ -470,7 +470,7 @@ public boolean next(NullWritable key, VectorizedRowBatch value) throws IOExcepti * reader (transactions) is concerned. Since here we are reading 'original' schema file, * all rows in it have been created by the same txn, namely 'syntheticProps.syntheticTxnId' */ - if (!validTxnList.isTxnValid(syntheticProps.syntheticTxnId)) { + if (!validWriteIdList.isWriteIdValid(syntheticProps.syntheticTxnId)) { selectedBitSet.clear(0, vectorizedRowBatchBase.size); } } @@ -514,7 +514,7 @@ public boolean next(NullWritable key, VectorizedRowBatch value) throws IOExcepti // Transfer columnVector objects from base batch to outgoing batch. System.arraycopy(payloadStruct.fields, 0, value.cols, 0, value.getDataColumnCount()); if(rowIdProjected) { - recordIdColumnVector.fields[0] = vectorizedRowBatchBase.cols[OrcRecordUpdater.ORIGINAL_TRANSACTION]; + recordIdColumnVector.fields[0] = vectorizedRowBatchBase.cols[OrcRecordUpdater.ORIGINAL_WRITEID]; recordIdColumnVector.fields[1] = vectorizedRowBatchBase.cols[OrcRecordUpdater.BUCKET]; recordIdColumnVector.fields[2] = vectorizedRowBatchBase.cols[OrcRecordUpdater.ROW_ID]; } @@ -531,24 +531,24 @@ private void findRecordsWithInvalidTransactionIds(VectorizedRowBatch batch, BitS } private void findRecordsWithInvalidTransactionIds(ColumnVector[] cols, int size, BitSet selectedBitSet) { - if (cols[OrcRecordUpdater.CURRENT_TRANSACTION].isRepeating) { + if (cols[OrcRecordUpdater.CURRENT_WRITEID].isRepeating) { // When we have repeating values, we can unset the whole bitset at once // if the repeating value is not a valid transaction. long currentTransactionIdForBatch = ((LongColumnVector) - cols[OrcRecordUpdater.CURRENT_TRANSACTION]).vector[0]; - if (!validTxnList.isTxnValid(currentTransactionIdForBatch)) { + cols[OrcRecordUpdater.CURRENT_WRITEID]).vector[0]; + if (!validWriteIdList.isWriteIdValid(currentTransactionIdForBatch)) { selectedBitSet.clear(0, size); } return; } long[] currentTransactionVector = - ((LongColumnVector) cols[OrcRecordUpdater.CURRENT_TRANSACTION]).vector; + ((LongColumnVector) cols[OrcRecordUpdater.CURRENT_WRITEID]).vector; // Loop through the bits that are set to true and mark those rows as false, if their // current transactions are not valid. for (int setBitIndex = selectedBitSet.nextSetBit(0); setBitIndex >= 0; setBitIndex = selectedBitSet.nextSetBit(setBitIndex+1)) { - if (!validTxnList.isTxnValid(currentTransactionVector[setBitIndex])) { + if (!validWriteIdList.isWriteIdValid(currentTransactionVector[setBitIndex])) { selectedBitSet.clear(setBitIndex); } } @@ -630,19 +630,19 @@ DeleteEventRegistry getDeleteEventRegistry() { private OrcRawRecordMerger.ReaderKey deleteRecordKey; private OrcStruct deleteRecordValue; private Boolean isDeleteRecordAvailable = null; - private ValidTxnList validTxnList; + private ValidWriteIdList validWriteIdList; SortMergedDeleteEventRegistry(JobConf conf, OrcSplit orcSplit, Reader.Options readerOptions) throws IOException { final Path[] deleteDeltas = getDeleteDeltaDirsFromSplit(orcSplit); if (deleteDeltas.length > 0) { int bucket = AcidUtils.parseBaseOrDeltaBucketFilename(orcSplit.getPath(), conf).getBucketId(); - String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); - this.validTxnList = (txnString == null) ? new ValidReadTxnList() : new ValidReadTxnList(txnString); + String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY); + this.validWriteIdList = (txnString == null) ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString); OrcRawRecordMerger.Options mergerOptions = new OrcRawRecordMerger.Options().isDeleteReader(true); assert !orcSplit.isOriginal() : "If this now supports Original splits, set up mergeOptions properly"; this.deleteRecords = new OrcRawRecordMerger(conf, true, null, false, bucket, - validTxnList, readerOptions, deleteDeltas, + validWriteIdList, readerOptions, deleteDeltas, mergerOptions); this.deleteRecordKey = new OrcRawRecordMerger.ReaderKey(); this.deleteRecordValue = this.deleteRecords.createValue(); @@ -671,8 +671,8 @@ public void findDeletedRecords(ColumnVector[] cols, int size, BitSet selectedBit } long[] originalTransaction = - cols[OrcRecordUpdater.ORIGINAL_TRANSACTION].isRepeating ? null - : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_TRANSACTION]).vector; + cols[OrcRecordUpdater.ORIGINAL_WRITEID].isRepeating ? null + : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_WRITEID]).vector; long[] bucket = cols[OrcRecordUpdater.BUCKET].isRepeating ? null : ((LongColumnVector) cols[OrcRecordUpdater.BUCKET]).vector; @@ -682,7 +682,7 @@ public void findDeletedRecords(ColumnVector[] cols, int size, BitSet selectedBit // The following repeatedX values will be set, if any of the columns are repeating. long repeatedOriginalTransaction = (originalTransaction != null) ? -1 - : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_TRANSACTION]).vector[0]; + : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_WRITEID]).vector[0]; long repeatedBucket = (bucket != null) ? -1 : ((LongColumnVector) cols[OrcRecordUpdater.BUCKET]).vector[0]; long repeatedRowId = (rowId != null) ? -1 @@ -828,12 +828,12 @@ public String toString() { private final RecordReader recordReader; private int indexPtrInBatch; private final int bucketForSplit; // The bucket value should be same for all the records. - private final ValidTxnList validTxnList; + private final ValidWriteIdList validWriteIdList; private boolean isBucketPropertyRepeating; private final boolean isBucketedTable; DeleteReaderValue(Reader deleteDeltaReader, Reader.Options readerOptions, int bucket, - ValidTxnList validTxnList, boolean isBucketedTable) throws IOException { + ValidWriteIdList validWriteIdList, boolean isBucketedTable) throws IOException { this.recordReader = deleteDeltaReader.rowsOptions(readerOptions); this.bucketForSplit = bucket; this.batch = deleteDeltaReader.getSchema().createRowBatch(); @@ -841,7 +841,7 @@ public String toString() { this.batch = null; // Oh! the first batch itself was null. Close the reader. } this.indexPtrInBatch = 0; - this.validTxnList = validTxnList; + this.validWriteIdList = validWriteIdList; this.isBucketedTable = isBucketedTable; checkBucketId();//check 1st batch } @@ -866,7 +866,7 @@ public boolean next(DeleteRecordKey deleteRecordKey) throws IOException { checkBucketId(deleteRecordKey.bucketProperty); } ++indexPtrInBatch; - if (validTxnList.isTxnValid(currentTransaction)) { + if (validWriteIdList.isWriteIdValid(currentTransaction)) { isValidNext = true; } } @@ -878,17 +878,17 @@ public void close() throws IOException { } private long setCurrentDeleteKey(DeleteRecordKey deleteRecordKey) { int originalTransactionIndex = - batch.cols[OrcRecordUpdater.ORIGINAL_TRANSACTION].isRepeating ? 0 : indexPtrInBatch; + batch.cols[OrcRecordUpdater.ORIGINAL_WRITEID].isRepeating ? 0 : indexPtrInBatch; long originalTransaction = - ((LongColumnVector) batch.cols[OrcRecordUpdater.ORIGINAL_TRANSACTION]).vector[originalTransactionIndex]; + ((LongColumnVector) batch.cols[OrcRecordUpdater.ORIGINAL_WRITEID]).vector[originalTransactionIndex]; int bucketPropertyIndex = batch.cols[OrcRecordUpdater.BUCKET].isRepeating ? 0 : indexPtrInBatch; int bucketProperty = (int)((LongColumnVector)batch.cols[OrcRecordUpdater.BUCKET]).vector[bucketPropertyIndex]; long rowId = ((LongColumnVector) batch.cols[OrcRecordUpdater.ROW_ID]).vector[indexPtrInBatch]; int currentTransactionIndex = - batch.cols[OrcRecordUpdater.CURRENT_TRANSACTION].isRepeating ? 0 : indexPtrInBatch; + batch.cols[OrcRecordUpdater.CURRENT_WRITEID].isRepeating ? 0 : indexPtrInBatch; long currentTransaction = - ((LongColumnVector) batch.cols[OrcRecordUpdater.CURRENT_TRANSACTION]).vector[currentTransactionIndex]; + ((LongColumnVector) batch.cols[OrcRecordUpdater.CURRENT_WRITEID]).vector[currentTransactionIndex]; deleteRecordKey.set(originalTransaction, bucketProperty, rowId); return currentTransaction; } @@ -976,14 +976,14 @@ public int compareTo(CompressedOtid other) { private TreeMap sortMerger; private long rowIds[]; private CompressedOtid compressedOtids[]; - private ValidTxnList validTxnList; + private ValidWriteIdList validWriteIdList; private Boolean isEmpty = null; ColumnizedDeleteEventRegistry(JobConf conf, OrcSplit orcSplit, Reader.Options readerOptions) throws IOException, DeleteEventsOverflowMemoryException { int bucket = AcidUtils.parseBaseOrDeltaBucketFilename(orcSplit.getPath(), conf).getBucketId(); - String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); - this.validTxnList = (txnString == null) ? new ValidReadTxnList() : new ValidReadTxnList(txnString); + String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY); + this.validWriteIdList = (txnString == null) ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString); this.sortMerger = new TreeMap(); this.rowIds = null; this.compressedOtids = null; @@ -1025,7 +1025,7 @@ public int compareTo(CompressedOtid other) { throw new DeleteEventsOverflowMemoryException(); } DeleteReaderValue deleteReaderValue = new DeleteReaderValue(deleteDeltaReader, - readerOptions, bucket, validTxnList, isBucketedTable); + readerOptions, bucket, validWriteIdList, isBucketedTable); DeleteRecordKey deleteRecordKey = new DeleteRecordKey(); if (deleteReaderValue.next(deleteRecordKey)) { sortMerger.put(deleteRecordKey, deleteReaderValue); @@ -1165,10 +1165,10 @@ public void findDeletedRecords(ColumnVector[] cols, int size, BitSet selectedBit // check if it is deleted or not. long[] originalTransactionVector = - cols[OrcRecordUpdater.ORIGINAL_TRANSACTION].isRepeating ? null - : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_TRANSACTION]).vector; + cols[OrcRecordUpdater.ORIGINAL_WRITEID].isRepeating ? null + : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_WRITEID]).vector; long repeatedOriginalTransaction = (originalTransactionVector != null) ? -1 - : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_TRANSACTION]).vector[0]; + : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_WRITEID]).vector[0]; long[] bucketProperties = cols[OrcRecordUpdater.BUCKET].isRepeating ? null diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 3968b0e..c9fa897 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -32,6 +32,7 @@ Licensed to the Apache Software Foundation (ASF) under one import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.LockComponentBuilder; import org.apache.hadoop.hive.metastore.LockRequestBuilder; @@ -50,6 +51,7 @@ Licensed to the Apache Software Foundation (ASF) under one import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -84,6 +86,12 @@ Licensed to the Apache Software Foundation (ASF) under one * transaction id. Thus is 1 is first transaction id. */ private volatile long txnId = 0; + + /** + * The local cache of table write IDs associated with current transaction + */ + private HashMap tableWriteIds = new HashMap<>(); + /** * assigns a unique monotonically increasing ID to each statement * which is part of an open transaction. This is used by storage @@ -91,7 +99,7 @@ Licensed to the Apache Software Foundation (ASF) under one * to keep apart multiple writes of the same data within the same transaction * Also see {@link org.apache.hadoop.hive.ql.io.AcidOutputFormat.Options} */ - private int writeId = -1; + private int stmtId = -1; /** * counts number of statements in the current transaction */ @@ -208,8 +216,9 @@ long openTxn(Context ctx, String user, long delay) throws LockException { } try { txnId = getMS().openTxn(user); - writeId = 0; + stmtId = 0; numStatements = 0; + tableWriteIds.clear(); isExplicitTransaction = false; startTransactionCount = 0; LOG.debug("Opened " + JavaUtils.txnIdToString(txnId)); @@ -241,7 +250,8 @@ public void acquireLocks(QueryPlan plan, Context ctx, String username) throws Lo catch(LockException e) { if(e.getCause() instanceof TxnAbortedException) { txnId = 0; - writeId = -1; + stmtId = -1; + tableWriteIds.clear(); } throw e; } @@ -597,8 +607,9 @@ public void commitTxn() throws LockException { e); } finally { txnId = 0; - writeId = -1; + stmtId = -1; numStatements = 0; + tableWriteIds.clear(); } } @@ -622,8 +633,9 @@ public void rollbackTxn() throws LockException { e); } finally { txnId = 0; - writeId = -1; + stmtId = -1; numStatements = 0; + tableWriteIds.clear(); } } @@ -743,12 +755,24 @@ private void stopHeartbeat() throws LockException { @Override public ValidTxnList getValidTxns() throws LockException { + assert isTxnOpen(); init(); try { return getMS().getValidTxns(txnId); } catch (TException e) { - throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), - e); + throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e); + } + } + + @Override + public ValidTxnWriteIdList getValidWriteIds(List tableList, + String validTxnString) throws LockException { + assert isTxnOpen(); + assert validTxnString != null && !validTxnString.isEmpty(); + try { + return getMS().getValidWriteIds(tableList, validTxnString); + } catch (TException e) { + throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e); } } @@ -886,9 +910,28 @@ public long getCurrentTxnId() { return txnId; } @Override - public int getWriteIdAndIncrement() { + public int getStmtIdAndIncrement() { assert isTxnOpen(); - return writeId++; + return stmtId++; + } + + @Override + public long getTableWriteId(String dbName, String tableName) throws LockException { + String fullTableName = AcidUtils.getFullTableName(dbName, tableName); + if (tableWriteIds.containsKey(fullTableName)) { + return tableWriteIds.get(fullTableName); + } + + try { + long writeId = 0; // If not called within a txn, then just return default Id of 0 + if (isTxnOpen()) { + writeId = getMS().allocateTableWriteId(txnId, dbName, tableName); + tableWriteIds.put(fullTableName, writeId); + } + return writeId; + } catch (TException e) { + throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e); + } } private static long getHeartbeatInterval(Configuration conf) throws LockException { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index cf8bc7f..3e1afe5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.lockmgr; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.common.ValidTxnList; @@ -60,12 +61,15 @@ public boolean isTxnOpen() { public long getCurrentTxnId() { return 0L; } - @Override - public int getWriteIdAndIncrement() { + public int getStmtIdAndIncrement() { return 0; } @Override + public long getTableWriteId(String dbName, String tableName) throws LockException { + return 0L; + } + @Override public HiveLockManager getLockManager() throws LockException { if (lockMgr == null) { boolean supportConcurrency = @@ -214,6 +218,12 @@ public ValidTxnList getValidTxns() throws LockException { } @Override + public ValidTxnWriteIdList getValidWriteIds(List tableList, + String validTxnString) throws LockException { + return new ValidTxnWriteIdList(); + } + + @Override public String getTxnManagerName() { return DummyTxnManager.class.getName(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java index 4f9f0c2..ca7d47c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.lockmgr; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.Driver.LockedDriverState; import org.apache.hadoop.hive.ql.QueryPlan; @@ -133,6 +134,20 @@ ValidTxnList getValidTxns() throws LockException; /** + * Get the table write Ids that are valid for the current transaction. The resulting + * {@link ValidTxnWriteIdList} object is a thrift object and can + * be passed to the processing + * tasks for use in the reading the data. This call should be made once up + * front by the planner per table and should never be called on the backend, + * as this will violate the isolation level semantics. + * @param tableList list of tables (.) read/written by current transaction. + * @param validTxnString snapshot of valid txns for the current txn + * @return list of valid table write Ids. + * @throws LockException + */ + ValidTxnWriteIdList getValidWriteIds(List tableList, String validTxnString) throws LockException; + + /** * Get the name for currently installed transaction manager. * @return transaction manager name */ @@ -222,9 +237,14 @@ long getCurrentTxnId(); /** + * if {@code isTxnOpen()}, returns the table write ID associated with current active transaction + */ + long getTableWriteId(String dbName, String tableName) throws LockException; + + /** * Should be though of more as a unique write operation ID in a given txn (at QueryPlan level). * Each statement writing data within a multi statement txn should have a unique WriteId. * Even a single statement, (e.g. Merge, multi-insert may generates several writes). */ - int getWriteIdAndIncrement(); + int getStmtIdAndIncrement(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 07999e2..cc50fbd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1970,7 +1970,7 @@ else if(!isAcidIUDoperation && isFullAcidTable) { * delta_x_x directory - same as any other Acid write. This method modifies the destPath to add * this path component. * @param txnId - id of current transaction (in which this operation is running) - * @param stmtId - see {@link DbTxnManager#getWriteIdAndIncrement()} + * @param stmtId - see {@link DbTxnManager#getStmtIdAndIncrement()} * @return appropriately modified path */ private Path fixFullAcidPathForLoadData(LoadFileType loadFileType, Path destPath, long txnId, int stmtId, Table tbl) throws HiveException { @@ -2234,13 +2234,13 @@ private void constructOneLBLocationMap(FileStatus fSta, * @param loadFileType * @param numDP number of dynamic partitions * @param isAcid true if this is an ACID operation - * @param txnId txnId, can be 0 unless isAcid == true + * @param writeId writeId, can be 0 unless isAcid == true * @return partition map details (PartitionSpec and Partition) * @throws HiveException */ public Map, Partition> loadDynamicPartitions(final Path loadPath, final String tableName, final Map partSpec, final LoadFileType loadFileType, - final int numDP, final int numLB, final boolean isAcid, final long txnId, final int stmtId, + final int numDP, final int numLB, final boolean isAcid, final long writeId, final int stmtId, final boolean hasFollowingStatsTask, final AcidUtils.Operation operation, boolean isInsertOverwrite) throws HiveException { @@ -2256,7 +2256,7 @@ private void constructOneLBLocationMap(FileStatus fSta, // Get all valid partition paths and existing partitions for them (if any) final Table tbl = getTable(tableName); - final Set validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, txnId, stmtId, + final Set validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, writeId, stmtId, AcidUtils.isInsertOnlyTable(tbl.getParameters()), isInsertOverwrite); final int partsToLoad = validPartitions.size(); @@ -2291,7 +2291,7 @@ public Void call() throws Exception { // load the partition Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, loadFileType, true, numLB > 0, - false, isAcid, hasFollowingStatsTask, txnId, stmtId); + false, isAcid, hasFollowingStatsTask, writeId, stmtId); partitionsMap.put(fullPartSpec, newPartition); if (inPlaceEligible) { @@ -2350,8 +2350,9 @@ public Void call() throws Exception { for (Partition p : partitionsMap.values()) { partNames.add(p.getName()); } - getMSC().addDynamicPartitions(txnId, tbl.getDbName(), tbl.getTableName(), - partNames, AcidUtils.toDataOperationType(operation)); + getMSC().addDynamicPartitions(parentSession.getTxnMgr().getCurrentTxnId(), + tbl.getDbName(), tbl.getTableName(), partNames, + AcidUtils.toDataOperationType(operation)); } LOG.info("Loaded " + partitionsMap.size() + " partitions"); return partitionsMap; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 69447d9..2cca0fc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -1270,7 +1270,7 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, FileSinkDesc fsInputDesc = fsInput.getConf(); if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("Creating merge work from " + System.identityHashCode(fsInput) - + " with write ID " + (fsInputDesc.isMmTable() ? fsInputDesc.getTransactionId() : null) + + " with write ID " + (fsInputDesc.isMmTable() ? fsInputDesc.getTableWriteId() : null) + " into " + finalName); } @@ -1280,7 +1280,7 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, fsInputDesc.getTableInfo().getInputFileFormatClass().equals(OrcInputFormat.class)); RowSchema inputRS = fsInput.getSchema(); - Long srcMmWriteId = fsInputDesc.isMmTable() ? fsInputDesc.getTransactionId() : null; + Long srcMmWriteId = fsInputDesc.isMmTable() ? fsInputDesc.getTableWriteId() : null; FileSinkDesc fsOutputDesc = null; TableScanOperator tsMerge = null; if (!isBlockMerge) { @@ -1675,7 +1675,7 @@ public static MapWork createMergeTask(FileSinkDesc fsInputDesc, Path finalName, fmd = new OrcFileMergeDesc(); } fmd.setIsMmTable(fsInputDesc.isMmTable()); - fmd.setTxnId(fsInputDesc.getTransactionId()); + fmd.setWriteId(fsInputDesc.getTableWriteId()); int stmtId = fsInputDesc.getStatementId(); fmd.setStmtId(stmtId == -1 ? 0 : stmtId); fmd.setDpCtx(fsInputDesc.getDynPartCtx()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 3eb869d..46ce9a1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -1592,7 +1592,7 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc); truncateTblDesc.setOutputDir(queryTmpdir); LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, - partSpec == null ? new HashMap<>() : partSpec, null); + partSpec == null ? new HashMap<>() : partSpec); ltd.setLbCtx(lbCtx); @SuppressWarnings("unchecked") Task moveTsk = @@ -2256,7 +2256,7 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, mergeDesc.setOutputDir(queryTmpdir); // No need to handle MM tables - unsupported path. LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, - partSpec == null ? new HashMap<>() : partSpec, null); + partSpec == null ? new HashMap<>() : partSpec); ltd.setLbCtx(lbCtx); Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index 54f5bab..ca06faa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; +import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -276,8 +277,13 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { Long txnId = null; int stmtId = -1; if (AcidUtils.isTransactionalTable(ts.tableHandle)) { - txnId = SessionState.get().getTxnMgr().getCurrentTxnId(); - stmtId = SessionState.get().getTxnMgr().getWriteIdAndIncrement(); + try { + txnId = SessionState.get().getTxnMgr().getTableWriteId(ts.tableHandle.getDbName(), + ts.tableHandle.getTableName()); + } catch (LockException ex) { + throw new SemanticException("Failed to allocate the write id", ex); + } + stmtId = SessionState.get().getTxnMgr().getStmtIdAndIncrement(); } LoadTableDesc loadTableWork; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index b67a03f..c1f05d7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -122,6 +122,8 @@ import org.apache.hadoop.hive.ql.lib.GraphWalker; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.hadoop.hive.ql.metadata.DummyPartition; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -6653,6 +6655,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) Map partSpec = null; boolean isMmTable = false, isMmCtas = false; Long txnId = null; + HiveTxnManager txnMgr = SessionState.get().getTxnMgr(); switch (dest_type.intValue()) { case QBMetaData.DEST_TABLE: { @@ -6732,11 +6735,15 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) //todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM checkAcidConstraints(qb, table_desc, dest_tab); } - if (isMmTable) { - txnId = SessionState.get().getTxnMgr().getCurrentTxnId(); - } else { - txnId = acidOp == Operation.NOT_ACID ? null : - SessionState.get().getTxnMgr().getCurrentTxnId(); + try { + if (isMmTable) { + txnId = txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName()); + } else { + txnId = acidOp == Operation.NOT_ACID ? null : + txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName()); + } + } catch (LockException ex) { + throw new SemanticException("Failed to allocate write Id", ex); } boolean isReplace = !qb.getParseInfo().isInsertIntoTable( dest_tab.getDbName(), dest_tab.getTableName()); @@ -6807,11 +6814,15 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) //todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM? checkAcidConstraints(qb, table_desc, dest_tab); } - if (isMmTable) { - txnId = SessionState.get().getTxnMgr().getCurrentTxnId(); - } else { - txnId = (acidOp == Operation.NOT_ACID) ? null : - SessionState.get().getTxnMgr().getCurrentTxnId(); + try { + if (isMmTable) { + txnId = txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName()); + } else { + txnId = (acidOp == Operation.NOT_ACID) ? null : + txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName()); + } + } catch (LockException ex) { + throw new SemanticException("Failed to allocate write Id", ex); } ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp, txnId); // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old @@ -6851,7 +6862,11 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) destTableIsMaterialization = tblDesc.isMaterialization(); if (AcidUtils.isInsertOnlyTable(tblDesc.getTblProps(), true)) { isMmTable = isMmCtas = true; - txnId = SessionState.get().getTxnMgr().getCurrentTxnId(); + try { + txnId = txnMgr.getTableWriteId(tblDesc.getDatabaseName(), tblDesc.getTableName()); + } catch (LockException ex) { + throw new SemanticException("Failed to allocate write Id", ex); + } tblDesc.setInitialMmWriteId(txnId); } } else if (viewDesc != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java index 4da868c..6bd0053 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java @@ -28,7 +28,7 @@ private int listBucketingDepth; private boolean hasDynamicPartitions; private boolean isListBucketingAlterTableConcatenate; - private Long txnId; + private Long writeId; private int stmtId; private boolean isMmTable; @@ -77,12 +77,12 @@ public void setListBucketingAlterTableConcatenate(boolean isListBucketingAlterTa this.isListBucketingAlterTableConcatenate = isListBucketingAlterTableConcatenate; } - public Long getTxnId() { - return txnId; + public Long getWriteId() { + return writeId; } - public void setTxnId(Long txnId) { - this.txnId = txnId; + public void setWriteId(Long writeId) { + this.writeId = writeId; } public int getStmtId() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index 92b8031..ce61fc5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -90,7 +90,7 @@ // Record what type of write this is. Default is non-ACID (ie old style). private AcidUtils.Operation writeType = AcidUtils.Operation.NOT_ACID; - private long txnId = 0; // transaction id for this operation + private long tableWriteId = 0; // table write id for this operation private int statementId = -1; private transient Table table; @@ -167,7 +167,7 @@ public Object clone() throws CloneNotSupportedException { ret.setStatsReliable(statsReliable); ret.setDpSortState(dpSortState); ret.setWriteType(writeType); - ret.setTransactionId(txnId); + ret.setTableWriteId(tableWriteId); ret.setStatementId(statementId); ret.setStatsTmpDir(statsTmpDir); ret.setIsMerge(isMerge); @@ -207,7 +207,7 @@ public Path getFinalDirName() { public Path getMergeInputDirName() { Path root = getFinalDirName(); if (isMmTable()) { - return new Path(root, AcidUtils.deltaSubdir(txnId, txnId, statementId)); + return new Path(root, AcidUtils.deltaSubdir(tableWriteId, tableWriteId, statementId)); } else { return root; } @@ -483,11 +483,11 @@ public void setWriteType(AcidUtils.Operation type) { public String getWriteTypeString() { return getWriteType() == AcidUtils.Operation.NOT_ACID ? null : getWriteType().toString(); } - public void setTransactionId(long id) { - txnId = id; + public void setTableWriteId(long id) { + tableWriteId = id; } - public long getTransactionId() { - return txnId; + public long getTableWriteId() { + return tableWriteId; } public void setStatementId(int id) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index a40c486..5cddc9a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -120,9 +120,9 @@ public LoadTableDesc(final Path sourcePath, */ public LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, - final Map partitionSpec, Long txnId) { + final Map partitionSpec) { this(sourcePath, table, partitionSpec, LoadFileType.REPLACE_ALL, - AcidUtils.Operation.NOT_ACID, txnId); + AcidUtils.Operation.NOT_ACID, null); } public LoadTableDesc(final Path sourcePath, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java index 661446d..61495e7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java @@ -102,6 +102,9 @@ // input file name (big) to bucket number private Map bucketFileNameMapping; + private String dbName = null; + private String tableName = null; + private boolean isMetadataOnly = false; private boolean isAcidTable; @@ -135,6 +138,10 @@ public TableScanDesc(final String alias, List vcs, Table tblMetad this.alias = alias; this.virtualCols = vcs; this.tableMetadata = tblMetadata; + if (tblMetadata != null) { + dbName = tblMetadata.getDbName(); + tableName = tblMetadata.getTableName(); + } isAcidTable = AcidUtils.isAcidTable(this.tableMetadata); if (isAcidTable) { acidOperationalProperties = AcidUtils.getAcidOperationalProperties(this.tableMetadata); @@ -154,12 +161,12 @@ public String getAlias() { @Explain(displayName = "table", jsonOnly = true) public String getTableName() { - return this.tableMetadata.getTableName(); + return this.tableName; } @Explain(displayName = "database", jsonOnly = true) public String getDatabaseName() { - return this.tableMetadata.getDbName(); + return this.dbName; } @Explain(displayName = "columns", jsonOnly = true) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 02097c8..df9fa00 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -23,8 +23,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.common.ValidReadTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; @@ -243,10 +243,10 @@ private void clean(CompactionInfo ci) throws MetaException { /** * Each Compaction only compacts as far as the highest txn id such that all txns below it - * are resolved (i.e. not opened). This is what "highestTxnId" tracks. This is only tracked - * since Hive 1.3.0/2.0 - thus may be 0. See ValidCompactorTxnList and uses for more info. + * are resolved (i.e. not opened). This is what "highestWriteId" tracks. This is only tracked + * since Hive 1.3.0/2.0 - thus may be 0. See ValidCompactorWriteIdList and uses for more info. * - * We only want to clean up to the highestTxnId - otherwise we risk deleteing deltas from + * We only want to clean up to the highestWriteId - otherwise we risk deleteing deltas from * under an active reader. * * Suppose we have deltas D2 D3 for table T, i.e. the last compaction created D3 so now there is a @@ -255,10 +255,10 @@ private void clean(CompactionInfo ci) throws MetaException { * Between that check and removeFiles() a query starts (it will be reading D3) and another compaction * completes which creates D4. * Now removeFiles() (more specifically AcidUtils.getAcidState()) will declare D3 to be obsolete - * unless ValidTxnList is "capped" at highestTxnId. + * unless ValidTxnList is "capped" at highestWriteId. */ - final ValidTxnList txnList = ci.highestTxnId > 0 ? - new ValidReadTxnList(new long[0], new BitSet(), ci.highestTxnId) : new ValidReadTxnList(); + final ValidWriteIdList txnList = ci.highestWriteId > 0 ? + new ValidReaderWriteIdList(ci.getFullTableName(), new long[0], new BitSet(), ci.highestWriteId) : new ValidReaderWriteIdList(); if (runJobAsSelf(ci.runAs)) { removeFiles(location, txnList); @@ -288,8 +288,8 @@ public Object run() throws Exception { } } - private void removeFiles(String location, ValidTxnList txnList) throws IOException { - AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(location), conf, txnList); + private void removeFiles(String location, ValidWriteIdList writeIdList) throws IOException { + AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(location), conf, writeIdList); List obsoleteDirs = dir.getObsolete(); List filesToDelete = new ArrayList(obsoleteDirs.size()); for (FileStatus stat : obsoleteDirs) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index 0e456df..b0c62f6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -34,8 +34,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StringableMap; -import org.apache.hadoop.hive.common.ValidCompactorTxnList; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -109,7 +109,7 @@ public CompactorMR() { } private JobConf createBaseJobConf(HiveConf conf, String jobName, Table t, StorageDescriptor sd, - ValidTxnList txns, CompactionInfo ci) { + ValidWriteIdList writeIds, CompactionInfo ci) { JobConf job = new JobConf(conf); job.setJobName(jobName); job.setOutputKeyClass(NullWritable.class); @@ -134,7 +134,7 @@ private JobConf createBaseJobConf(HiveConf conf, String jobName, Table t, Storag job.setBoolean(IS_COMPRESSED, sd.isCompressed()); job.set(TABLE_PROPS, new StringableMap(t.getParameters()).toString()); job.setInt(NUM_BUCKETS, sd.getNumBuckets()); - job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString()); + job.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIds.toString()); overrideMRProps(job, t.getParameters()); // override MR properties from tblproperties if applicable if (ci.properties != null) { overrideTblProps(job, t.getParameters(), ci.properties); @@ -196,12 +196,12 @@ private void overrideMRProps(JobConf job, Map properties) { * @param jobName name to run this job with * @param t metastore table * @param sd metastore storage descriptor - * @param txns list of valid transactions + * @param writeIds list of valid write ids * @param ci CompactionInfo * @throws java.io.IOException if the job fails */ void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, - ValidTxnList txns, CompactionInfo ci, Worker.StatsUpdater su, TxnStore txnHandler) throws IOException { + ValidWriteIdList writeIds, CompactionInfo ci, Worker.StatsUpdater su, TxnStore txnHandler) throws IOException { if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION)) { throw new RuntimeException(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION.name() + "=true"); @@ -212,18 +212,18 @@ void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, if (AcidUtils.isInsertOnlyTable(t.getParameters())) { LOG.debug("Going to delete directories for aborted transactions for MM table " + t.getDbName() + "." + t.getTableName()); - removeFiles(conf, sd.getLocation(), txns, t); + removeFiles(conf, sd.getLocation(), writeIds, t); return; } - JobConf job = createBaseJobConf(conf, jobName, t, sd, txns, ci); + JobConf job = createBaseJobConf(conf, jobName, t, sd, writeIds, ci); // Figure out and encode what files we need to read. We do this here (rather than in // getSplits below) because as part of this we discover our minimum and maximum transactions, // and discovering that in getSplits is too late as we then have no way to pass it to our // mapper. - AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, txns, false, true); + AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, writeIds, false, true); List parsedDeltas = dir.getCurrentDirectories(); int maxDeltastoHandle = conf.getIntVar(HiveConf.ConfVars.COMPACTOR_MAX_NUM_DELTA); if(parsedDeltas.size() > maxDeltastoHandle) { @@ -241,14 +241,14 @@ void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, "runaway/mis-configured process writing to ACID tables, especially using Streaming Ingest API."); int numMinorCompactions = parsedDeltas.size() / maxDeltastoHandle; for(int jobSubId = 0; jobSubId < numMinorCompactions; jobSubId++) { - JobConf jobMinorCompact = createBaseJobConf(conf, jobName + "_" + jobSubId, t, sd, txns, ci); + JobConf jobMinorCompact = createBaseJobConf(conf, jobName + "_" + jobSubId, t, sd, writeIds, ci); launchCompactionJob(jobMinorCompact, null, CompactionType.MINOR, null, parsedDeltas.subList(jobSubId * maxDeltastoHandle, (jobSubId + 1) * maxDeltastoHandle), maxDeltastoHandle, -1, conf, txnHandler, ci.id, jobName); } //now recompute state since we've done minor compactions and have different 'best' set of deltas - dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, txns); + dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, writeIds); } StringableList dirsToSearch = new StringableList(); @@ -279,8 +279,8 @@ void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, if (parsedDeltas.size() == 0 && dir.getOriginalFiles().size() == 0) { // Skip compaction if there's no delta files AND there's no original files String minOpenInfo = "."; - if(txns.getMinOpenTxn() != null) { - minOpenInfo = " with min Open " + JavaUtils.txnIdToString(txns.getMinOpenTxn()) + + if(writeIds.getMinOpenWriteId() != null) { + minOpenInfo = " with min Open " + JavaUtils.writeIdToString(writeIds.getMinOpenWriteId()) + ". Compaction cannot compact above this txnid"; } LOG.error("No delta files or original files found to compact in " + sd.getLocation() + @@ -315,8 +315,8 @@ private void launchCompactionJob(JobConf job, Path baseDir, CompactionType compa LOG.debug("Adding delta " + delta.getPath() + " to directories to search"); dirsToSearch.add(delta.getPath()); deltaDirs.add(delta.getPath()); - minTxn = Math.min(minTxn, delta.getMinTransaction()); - maxTxn = Math.max(maxTxn, delta.getMaxTransaction()); + minTxn = Math.min(minTxn, delta.getMinWriteId()); + maxTxn = Math.max(maxTxn, delta.getMaxWriteId()); } if (baseDir != null) job.set(BASE_DIR, baseDir.toString()); @@ -378,9 +378,9 @@ private void setColumnTypes(JobConf job, List cols) { } // Remove the directories for aborted transactions only - private void removeFiles(HiveConf conf, String location, ValidTxnList txnList, Table t) + private void removeFiles(HiveConf conf, String location, ValidWriteIdList writeIdList, Table t) throws IOException { - AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(location), conf, txnList, + AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(location), conf, writeIdList, Ref.from(false), false, t.getParameters()); // For MM table, we only want to delete delta dirs for aborted txns. List abortedDirs = dir.getAbortedDirectories(); @@ -717,13 +717,13 @@ public void map(WritableComparable key, CompactorInputSplit split, @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class AcidInputFormat aif = instantiate(AcidInputFormat.class, jobConf.get(INPUT_FORMAT_CLASS_NAME)); - ValidTxnList txnList = - new ValidCompactorTxnList(jobConf.get(ValidTxnList.VALID_TXNS_KEY)); + ValidWriteIdList writeIdList = + new ValidCompactorWriteIdList(jobConf.get(ValidWriteIdList.VALID_WRITEIDS_KEY)); boolean isMajor = jobConf.getBoolean(IS_MAJOR, false); AcidInputFormat.RawReader reader = aif.getRawReader(jobConf, isMajor, split.getBucket(), - txnList, split.getBaseDir(), split.getDeltaDirs()); + writeIdList, split.getBaseDir(), split.getDeltaDirs()); RecordIdentifier identifier = reader.createKey(); V value = reader.createValue(); getWriter(reporter, reader.getObjectInspector(), split.getBucket()); @@ -778,8 +778,8 @@ private void getWriter(Reporter reporter, ObjectInspector inspector, .isCompressed(jobConf.getBoolean(IS_COMPRESSED, false)) .tableProperties(new StringableMap(jobConf.get(TABLE_PROPS)).toProperties()) .reporter(reporter) - .minimumTransactionId(jobConf.getLong(MIN_TXN, Long.MAX_VALUE)) - .maximumTransactionId(jobConf.getLong(MAX_TXN, Long.MIN_VALUE)) + .minimumWriteId(jobConf.getLong(MIN_TXN, Long.MAX_VALUE)) + .maximumWriteId(jobConf.getLong(MAX_TXN, Long.MIN_VALUE)) .bucket(bucket) .statementId(-1);//setting statementId == -1 makes compacted delta files use //delta_xxxx_yyyy format @@ -803,8 +803,8 @@ private void getDeleteEventWriter(Reporter reporter, ObjectInspector inspector, .isCompressed(jobConf.getBoolean(IS_COMPRESSED, false)) .tableProperties(new StringableMap(jobConf.get(TABLE_PROPS)).toProperties()) .reporter(reporter) - .minimumTransactionId(jobConf.getLong(MIN_TXN, Long.MAX_VALUE)) - .maximumTransactionId(jobConf.getLong(MAX_TXN, Long.MIN_VALUE)) + .minimumWriteId(jobConf.getLong(MIN_TXN, Long.MAX_VALUE)) + .maximumWriteId(jobConf.getLong(MAX_TXN, Long.MIN_VALUE)) .bucket(bucket) .statementId(-1);//setting statementId == -1 makes compacted delta files use //delta_xxxx_yyyy format @@ -925,8 +925,8 @@ public void commitJob(JobContext context) throws IOException { AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) .writingBase(conf.getBoolean(IS_MAJOR, false)) .isCompressed(conf.getBoolean(IS_COMPRESSED, false)) - .minimumTransactionId(conf.getLong(MIN_TXN, Long.MAX_VALUE)) - .maximumTransactionId(conf.getLong(MAX_TXN, Long.MIN_VALUE)) + .minimumWriteId(conf.getLong(MIN_TXN, Long.MAX_VALUE)) + .maximumWriteId(conf.getLong(MAX_TXN, Long.MIN_VALUE)) .bucket(0) .statementId(-1); Path newDeltaDir = AcidUtils.createFilename(finalLocation, options).getParent(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index a52e023..94975d1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -21,11 +21,12 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionResponse; import org.apache.hadoop.hive.metastore.api.CompactionType; +import org.apache.hadoop.hive.metastore.api.GetOpenWriteIdsRequest; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; @@ -46,6 +47,7 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -88,8 +90,6 @@ public void run() { startedAt = System.currentTimeMillis(); //todo: add method to only get current i.e. skip history - more efficient ShowCompactResponse currentCompactions = txnHandler.showCompact(new ShowCompactRequest()); - ValidTxnList txns = - TxnUtils.createValidCompactTxnList(txnHandler.getOpenTxnsInfo()); Set potentials = txnHandler.findPotentialCompactions(abortedThreshold); LOG.debug("Found " + potentials.size() + " potential compactions, " + "checking to see if we should compact any of them"); @@ -143,12 +143,20 @@ public void run() { ", assuming it has been dropped and moving on."); continue; } + + // Compaction doesn't work under a transaction and hence pass 0 for current txn Id + // The response will have one entry per table and hence we get only one OpenWriteIds + String fullTableName = TxnUtils.getFullTableName(t.getDbName(), t.getTableName()); + GetOpenWriteIdsRequest rqst = new GetOpenWriteIdsRequest(Collections.singletonList(fullTableName), null); + ValidWriteIdList writeIds = + TxnUtils.createValidCompactWriteIdList(txnHandler.getOpenWriteIds(rqst).getOpenWriteIds().get(0)); + StorageDescriptor sd = resolveStorageDescriptor(t, p); String runAs = findUserToRunAs(sd.getLocation(), t); /*Future thought: checkForCompaction will check a lot of file metadata and may be expensive. * Long term we should consider having a thread pool here and running checkForCompactionS * in parallel*/ - CompactionType compactionNeeded = checkForCompaction(ci, txns, sd, t.getParameters(), runAs); + CompactionType compactionNeeded = checkForCompaction(ci, writeIds, sd, t.getParameters(), runAs); if (compactionNeeded != null) requestCompaction(ci, runAs, compactionNeeded); } catch (Throwable t) { LOG.error("Caught exception while trying to determine if we should compact " + @@ -215,7 +223,7 @@ private boolean lookForCurrentCompactions(ShowCompactResponse compactions, } private CompactionType checkForCompaction(final CompactionInfo ci, - final ValidTxnList txns, + final ValidWriteIdList writeIds, final StorageDescriptor sd, final Map tblproperties, final String runAs) @@ -227,7 +235,7 @@ private CompactionType checkForCompaction(final CompactionInfo ci, return CompactionType.MAJOR; } if (runJobAsSelf(runAs)) { - return determineCompactionType(ci, txns, sd, tblproperties); + return determineCompactionType(ci, writeIds, sd, tblproperties); } else { LOG.info("Going to initiate as user " + runAs); UserGroupInformation ugi = UserGroupInformation.createProxyUser(runAs, @@ -235,7 +243,7 @@ private CompactionType checkForCompaction(final CompactionInfo ci, CompactionType compactionType = ugi.doAs(new PrivilegedExceptionAction() { @Override public CompactionType run() throws Exception { - return determineCompactionType(ci, txns, sd, tblproperties); + return determineCompactionType(ci, writeIds, sd, tblproperties); } }); try { @@ -248,7 +256,7 @@ public CompactionType run() throws Exception { } } - private CompactionType determineCompactionType(CompactionInfo ci, ValidTxnList txns, + private CompactionType determineCompactionType(CompactionInfo ci, ValidWriteIdList writeIds, StorageDescriptor sd, Map tblproperties) throws IOException, InterruptedException { @@ -259,7 +267,7 @@ private CompactionType determineCompactionType(CompactionInfo ci, ValidTxnList t boolean noBase = false; Path location = new Path(sd.getLocation()); FileSystem fs = location.getFileSystem(conf); - AcidUtils.Directory dir = AcidUtils.getAcidState(location, conf, txns, false, false); + AcidUtils.Directory dir = AcidUtils.getAcidState(location, conf, writeIds, false, false); Path base = dir.getBaseDirectory(); long baseSize = 0; FileStatus stat = null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 4508e59..8df7b1f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -18,16 +18,17 @@ package org.apache.hadoop.hive.ql.txn.compactor; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hive.metastore.api.GetOpenWriteIdsRequest; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.mapred.JobConf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.txn.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.CommandNeedRetryException; @@ -138,10 +139,15 @@ public void run() { } final boolean isMajor = ci.isMajorCompaction(); - final ValidTxnList txns = - TxnUtils.createValidCompactTxnList(txnHandler.getOpenTxnsInfo()); - LOG.debug("ValidCompactTxnList: " + txns.writeToString()); - txnHandler.setCompactionHighestTxnId(ci, txns.getHighWatermark()); + + // Compaction doesn't work under a transaction and hence pass 0 for current txn Id + // The response will have one entry per table and hence we get only one OpenWriteIds + String fullTableName = TxnUtils.getFullTableName(t.getDbName(), t.getTableName()); + GetOpenWriteIdsRequest rqst = new GetOpenWriteIdsRequest(Collections.singletonList(fullTableName), null); + final ValidWriteIdList writeIds = + TxnUtils.createValidCompactWriteIdList(txnHandler.getOpenWriteIds(rqst).getOpenWriteIds().get(0)); + LOG.debug("ValidCompactWriteIdList: " + writeIds.writeToString()); + txnHandler.setCompactionHighestWriteId(ci, writeIds.getHighWatermark()); final StringBuilder jobName = new StringBuilder(name); jobName.append("-compactor-"); jobName.append(ci.getFullPartitionName()); @@ -164,14 +170,14 @@ public void run() { launchedJob = true; try { if (runJobAsSelf(runAs)) { - mr.run(conf, jobName.toString(), t, sd, txns, ci, su, txnHandler); + mr.run(conf, jobName.toString(), t, sd, writeIds, ci, su, txnHandler); } else { UserGroupInformation ugi = UserGroupInformation.createProxyUser(t.getOwner(), UserGroupInformation.getLoginUser()); ugi.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws Exception { - mr.run(conf, jobName.toString(), t, sd, txns, ci, su, txnHandler); + mr.run(conf, jobName.toString(), t, sd, writeIds, ci, su, txnHandler); return null; } }); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java index 2a1545f..9e8fcca 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java @@ -766,8 +766,8 @@ public void testNonAcidToAcidConversion01() throws Exception { Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nonacidorctbl/000001_0")); Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":1}\t1\t5")); Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nonacidorctbl/000001_0_copy_1")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":16,\"bucketid\":536936448,\"rowid\":0}\t1\t17")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nonacidorctbl/delta_0000016_0000016_0000/bucket_00001")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t17")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nonacidorctbl/delta_0000001_0000001_0000/bucket_00001")); //run Compaction runStatementOnDriver("alter table "+ TestTxnCommands2.Table.NONACIDORCTBL +" compact 'major'"); TestTxnCommands2.runWorker(hiveConf); @@ -778,13 +778,13 @@ public void testNonAcidToAcidConversion01() throws Exception { } Assert.assertEquals("", 4, rs.size()); Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t0\t12")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nonacidorctbl/base_0000016/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nonacidorctbl/base_0000001/bucket_00000")); Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":0}\t1\t2")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nonacidorctbl/base_0000016/bucket_00001")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nonacidorctbl/base_0000001/bucket_00001")); Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":1}\t1\t5")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nonacidorctbl/base_0000016/bucket_00001")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":16,\"bucketid\":536936448,\"rowid\":0}\t1\t17")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nonacidorctbl/base_0000016/bucket_00001")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nonacidorctbl/base_0000001/bucket_00001")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t17")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nonacidorctbl/base_0000001/bucket_00001")); //make sure they are the same before and after compaction } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java index 048215a..28fd19d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java @@ -34,7 +34,8 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.api.CompactionRequest; @@ -366,14 +367,14 @@ public void testNonAcidToAcidConversion02() throws Exception { */ String[][] expected = { {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t0\t13", "bucket_00000"}, - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":0}\t0\t15", "bucket_00000"}, - {"{\"transactionid\":22,\"bucketid\":536870912,\"rowid\":0}\t0\t17", "bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t0\t120", "bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t15", "bucket_00000"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t0\t17", "bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t120", "bucket_00000"}, {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":1}\t1\t2", "bucket_00001"}, {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":3}\t1\t4", "bucket_00001"}, {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":2}\t1\t5", "bucket_00001"}, {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":4}\t1\t6", "bucket_00001"}, - {"{\"transactionid\":20,\"bucketid\":536936448,\"rowid\":0}\t1\t16", "bucket_00001"} + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t16", "bucket_00001"} }; Assert.assertEquals("Unexpected row count before compaction", expected.length, rs.size()); for(int i = 0; i < expected.length; i++) { @@ -758,11 +759,11 @@ public void testNonAcidToAcidConversion3() throws Exception { FileStatus[] buckets = fs.listStatus(status[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); Arrays.sort(buckets); if (numDelta == 1) { - Assert.assertEquals("delta_0000024_0000024_0000", status[i].getPath().getName()); + Assert.assertEquals("delta_0000001_0000001_0000", status[i].getPath().getName()); Assert.assertEquals(BUCKET_COUNT - 1, buckets.length); Assert.assertEquals("bucket_00001", buckets[0].getPath().getName()); } else if (numDelta == 2) { - Assert.assertEquals("delta_0000025_0000025_0000", status[i].getPath().getName()); + Assert.assertEquals("delta_0000002_0000002_0000", status[i].getPath().getName()); Assert.assertEquals(1, buckets.length); Assert.assertEquals("bucket_00001", buckets[0].getPath().getName()); } @@ -771,7 +772,7 @@ public void testNonAcidToAcidConversion3() throws Exception { FileStatus[] buckets = fs.listStatus(status[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); Arrays.sort(buckets); if (numDeleteDelta == 1) { - Assert.assertEquals("delete_delta_0000024_0000024_0000", status[i].getPath().getName()); + Assert.assertEquals("delete_delta_0000001_0000001_0000", status[i].getPath().getName()); Assert.assertEquals(BUCKET_COUNT - 1, buckets.length); Assert.assertEquals("bucket_00001", buckets[0].getPath().getName()); } @@ -818,7 +819,7 @@ public void testNonAcidToAcidConversion3() throws Exception { Assert.assertEquals("bucket_00001", buckets[0].getPath().getName()); } else if (numBase == 2) { // The new base dir now has two bucket files, since the delta dir has two bucket files - Assert.assertEquals("base_0000025", status[i].getPath().getName()); + Assert.assertEquals("base_0000002", status[i].getPath().getName()); Assert.assertEquals(1, buckets.length); Assert.assertEquals("bucket_00001", buckets[0].getPath().getName()); } @@ -844,7 +845,7 @@ public void testNonAcidToAcidConversion3() throws Exception { status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + (Table.NONACIDORCTBL).toString().toLowerCase()), FileUtils.HIDDEN_FILES_PATH_FILTER); Assert.assertEquals(1, status.length); - Assert.assertEquals("base_0000025", status[0].getPath().getName()); + Assert.assertEquals("base_0000002", status[0].getPath().getName()); FileStatus[] buckets = fs.listStatus(status[0].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); Arrays.sort(buckets); Assert.assertEquals(1, buckets.length); @@ -860,7 +861,7 @@ public void testNonAcidToAcidConversion3() throws Exception { public void testValidTxnsBookkeeping() throws Exception { // 1. Run a query against a non-ACID table, and we shouldn't have txn logged in conf runStatementOnDriver("select * from " + Table.NONACIDORCTBL); - String value = hiveConf.get(ValidTxnList.VALID_TXNS_KEY); + String value = hiveConf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); Assert.assertNull("The entry should be null for query that doesn't involve ACID tables", value); } @@ -873,9 +874,9 @@ public void testSimpleRead() throws Exception { //this will cause next txn to be marked aborted but the data is still written to disk hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + " " + makeValuesClause(tableData2)); - assert hiveConf.get(ValidTxnList.VALID_TXNS_KEY) == null : "previous txn should've cleaned it"; + assert hiveConf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) == null : "previous txn should've cleaned it"; //so now if HIVEFETCHTASKCONVERSION were to use a stale value, it would use a - //ValidTxnList with HWM=MAX_LONG, i.e. include the data for aborted txn + //ValidWriteIdList with HWM=MAX_LONG, i.e. include the data for aborted txn List rs = runStatementOnDriver("select * from " + Table.ACIDTBL); Assert.assertEquals("Extra data", 2, rs.size()); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java index 3a3272f..1f39c20 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java @@ -105,13 +105,13 @@ private void loadDataUpdate(boolean isVectorized) throws Exception { String testQuery = isVectorized ? "select ROW__ID, a, b from T order by ROW__ID" : "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; String[][] expected = new String[][]{ - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000020_0000020_0000/000000_0"}, - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000020_0000020_0000/000000_0"}}; + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/000000_0"}}; checkResult(expected, testQuery, isVectorized, "load data inpath"); runStatementOnDriver("update T set b = 17 where a = 1"); String[][] expected2 = new String[][]{ - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000020_0000020_0000/000000_0"}, - {"{\"transactionid\":23,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000023_0000023_0000/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000002_0000002_0000/bucket_00000"} }; checkResult(expected2, testQuery, isVectorized, "update"); @@ -121,15 +121,15 @@ private void loadDataUpdate(boolean isVectorized) throws Exception { runStatementOnDriver("alter table T compact 'minor'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected3 = new String[][] { - {"{\"transactionid\":23,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000020_0000027/bucket_00000"}, - {"{\"transactionid\":26,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/delta_0000020_0000027/bucket_00000"} + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000001_0000004/bucket_00000"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/delta_0000001_0000004/bucket_00000"} }; checkResult(expected3, testQuery, isVectorized, "delete compact minor"); runStatementOnDriver("load data local inpath '" + getWarehouseDir() + "/1/data' overwrite into table T"); String[][] expected4 = new String[][]{ - {"{\"transactionid\":31,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000031/000000_0"}, - {"{\"transactionid\":31,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000031/000000_0"}}; + {"{\"transactionid\":5,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000005/000000_0"}, + {"{\"transactionid\":5,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000005/000000_0"}}; checkResult(expected4, testQuery, isVectorized, "load data inpath overwrite"); //load same data again (additive) @@ -138,9 +138,9 @@ private void loadDataUpdate(boolean isVectorized) throws Exception { runStatementOnDriver("delete from T where a = 3");//matches 2 rows runStatementOnDriver("insert into T values(2,2)"); String[][] expected5 = new String[][]{ - {"{\"transactionid\":35,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000035_0000035_0000/bucket_00000"}, - {"{\"transactionid\":35,\"bucketid\":536870912,\"rowid\":1}\t1\t17", "t/delta_0000035_0000035_0000/bucket_00000"}, - {"{\"transactionid\":37,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/delta_0000037_0000037_0000/bucket_00000"} + {"{\"transactionid\":7,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000007_0000007_0000/bucket_00000"}, + {"{\"transactionid\":7,\"bucketid\":536870912,\"rowid\":1}\t1\t17", "t/delta_0000007_0000007_0000/bucket_00000"}, + {"{\"transactionid\":9,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/delta_0000009_0000009_0000/bucket_00000"} }; checkResult(expected5, testQuery, isVectorized, "load data inpath overwrite update"); @@ -148,9 +148,9 @@ private void loadDataUpdate(boolean isVectorized) throws Exception { runStatementOnDriver("alter table T compact 'major'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected6 = new String[][]{ - {"{\"transactionid\":35,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/base_0000037/bucket_00000"}, - {"{\"transactionid\":35,\"bucketid\":536870912,\"rowid\":1}\t1\t17", "t/base_0000037/bucket_00000"}, - {"{\"transactionid\":37,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/base_0000037/bucket_00000"} + {"{\"transactionid\":7,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/base_0000009/bucket_00000"}, + {"{\"transactionid\":7,\"bucketid\":536870912,\"rowid\":1}\t1\t17", "t/base_0000009/bucket_00000"}, + {"{\"transactionid\":9,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/base_0000009/bucket_00000"} }; checkResult(expected6, testQuery, isVectorized, "load data inpath compact major"); } @@ -174,21 +174,21 @@ private void loadData(boolean isVectorized) throws Exception { "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; String[][] expected = new String[][] { //normal insert - {"{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/delta_0000016_0000016_0000/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/delta_0000016_0000016_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/delta_0000001_0000001_0000/bucket_00000"}, //Load Data - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000021_0000021_0000/000000_0"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000021_0000021_0000/000000_0"}}; + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000002_0000002_0000/000000_0"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000002_0000002_0000/000000_0"}}; checkResult(expected, testQuery, isVectorized, "load data inpath"); //test minor compaction runStatementOnDriver("alter table T compact 'minor'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected1 = new String[][] { - {"{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/delta_0000016_0000021/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/delta_0000016_0000021/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000016_0000021/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000016_0000021/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/delta_0000001_0000002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/delta_0000001_0000002/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000002/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000002/bucket_00000"} }; checkResult(expected1, testQuery, isVectorized, "load data inpath (minor)"); @@ -197,11 +197,11 @@ private void loadData(boolean isVectorized) throws Exception { runStatementOnDriver("alter table T compact 'major'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected2 = new String[][] { - {"{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/base_0000027/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/base_0000027/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000027/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000027/bucket_00000"}, - {"{\"transactionid\":27,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/base_0000027/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/base_0000003/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/base_0000003/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000003/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000003/bucket_00000"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/base_0000003/bucket_00000"} }; checkResult(expected2, testQuery, isVectorized, "load data inpath (major)"); @@ -210,8 +210,8 @@ private void loadData(boolean isVectorized) throws Exception { runStatementOnDriver("export table Tstage to '" + getWarehouseDir() +"/2'"); runStatementOnDriver("load data inpath '" + getWarehouseDir() + "/2/data' overwrite into table T"); String[][] expected3 = new String[][] { - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000033/000000_0"}, - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000033/000000_0"}}; + {"{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000004/000000_0"}, + {"{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000004/000000_0"}}; checkResult(expected3, testQuery, isVectorized, "load data inpath overwrite"); //one more major compaction @@ -219,9 +219,9 @@ private void loadData(boolean isVectorized) throws Exception { runStatementOnDriver("alter table T compact 'major'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected4 = new String[][] { - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000036/bucket_00000"}, - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000036/bucket_00000"}, - {"{\"transactionid\":36,\"bucketid\":536870912,\"rowid\":0}\t6\t6", "t/base_0000036/bucket_00000"}}; + {"{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000005/bucket_00000"}, + {"{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000005/bucket_00000"}, + {"{\"transactionid\":5,\"bucketid\":536870912,\"rowid\":0}\t6\t6", "t/base_0000005/bucket_00000"}}; checkResult(expected4, testQuery, isVectorized, "load data inpath overwrite (major)"); } /** @@ -254,20 +254,20 @@ private void loadDataNonAcid2AcidConversion(boolean isVectorized) throws Excepti /* {"transactionid":0,"bucketid":536870912,"rowid":0} 0 2/000000_0 {"transactionid":0,"bucketid":536870912,"rowid":1} 0 4/000000_0 -{"transactionid":24,"bucketid":536870912,"rowid":0} 4 4/delta_0000024_0000024_0000/000000_0 -{"transactionid":24,"bucketid":536870912,"rowid":1} 5 5/delta_0000024_0000024_0000/000000_0 +{"transactionid":1,"bucketid":536870912,"rowid":0} 4 4/delta_0000001_0000001_0000/000000_0 +{"transactionid":1,"bucketid":536870912,"rowid":1} 5 5/delta_0000001_0000001_0000/000000_0 */ String[][] expected = new String[][] { //from pre-acid insert {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/000000_0"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/000000_0"}, //from Load Data into acid converted table - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000024_0000024_0000/000000_0"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000024_0000024_0000/000000_0"}, - {"{\"transactionid\":24,\"bucketid\":536936448,\"rowid\":0}\t2\t2", "t/delta_0000024_0000024_0000/000001_0"}, - {"{\"transactionid\":24,\"bucketid\":536936448,\"rowid\":1}\t3\t3", "t/delta_0000024_0000024_0000/000001_0"}, - {"{\"transactionid\":24,\"bucketid\":537001984,\"rowid\":0}\t4\t4", "t/delta_0000024_0000024_0000/000002_0"}, - {"{\"transactionid\":24,\"bucketid\":537001984,\"rowid\":1}\t5\t5", "t/delta_0000024_0000024_0000/000002_0"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t2\t2", "t/delta_0000001_0000001_0000/000001_0"}, + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t3\t3", "t/delta_0000001_0000001_0000/000001_0"}, + {"{\"transactionid\":1,\"bucketid\":537001984,\"rowid\":0}\t4\t4", "t/delta_0000001_0000001_0000/000002_0"}, + {"{\"transactionid\":1,\"bucketid\":537001984,\"rowid\":1}\t5\t5", "t/delta_0000001_0000001_0000/000002_0"}, }; checkResult(expected, testQuery, isVectorized, "load data inpath"); @@ -279,9 +279,9 @@ private void loadDataNonAcid2AcidConversion(boolean isVectorized) throws Excepti runStatementOnDriver("load data local inpath '" + getWarehouseDir() + "/2/data' overwrite into table T"); String[][] expected2 = new String[][] { - {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000030/000000_0"}, - {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000030/000000_0"}, - {"{\"transactionid\":30,\"bucketid\":536936448,\"rowid\":0}\t8\t8", "t/base_0000030/000001_0"} + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000002/000000_0"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000002/000000_0"}, + {"{\"transactionid\":2,\"bucketid\":536936448,\"rowid\":0}\t8\t8", "t/base_0000002/000001_0"} }; checkResult(expected2, testQuery, isVectorized, "load data inpath overwrite"); @@ -291,10 +291,10 @@ private void loadDataNonAcid2AcidConversion(boolean isVectorized) throws Excepti TestTxnCommands2.runWorker(hiveConf); String[][] expected3 = new String[][] { - {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000033/bucket_00000"}, - {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000033/bucket_00000"}, - {"{\"transactionid\":30,\"bucketid\":536936448,\"rowid\":0}\t8\t8", "t/base_0000033/bucket_00001"}, - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":0}\t9\t9", "t/base_0000033/bucket_00000"} + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000003/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000003/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536936448,\"rowid\":0}\t8\t8", "t/base_0000003/bucket_00001"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t9\t9", "t/base_0000003/bucket_00000"} }; checkResult(expected3, testQuery, isVectorized, "load data inpath overwrite (major)"); @@ -326,12 +326,12 @@ public void loadDataPartitioned() throws Exception { List rs = runStatementOnDriver("select ROW__ID, p, a, b, INPUT__FILE__NAME from T order by p, ROW__ID"); String[][] expected = new String[][] { - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", "t/p=0/delta_0000020_0000020_0000/000000_0"}, - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", "t/p=0/delta_0000020_0000020_0000/000000_0"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t2", "t/p=1/delta_0000024_0000024_0000/000000_0"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4", "t/p=1/delta_0000024_0000024_0000/000000_0"}, - {"{\"transactionid\":28,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t2", "t/p=1/delta_0000028_0000028_0000/000000_0"}, - {"{\"transactionid\":28,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4", "t/p=1/delta_0000028_0000028_0000/000000_0"}}; + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", "t/p=0/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", "t/p=0/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t2", "t/p=1/delta_0000002_0000002_0000/000000_0"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4", "t/p=1/delta_0000002_0000002_0000/000000_0"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t2", "t/p=1/delta_0000003_0000003_0000/000000_0"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4", "t/p=1/delta_0000003_0000003_0000/000000_0"}}; checkExpected(rs, expected, "load data inpath partitioned"); @@ -340,10 +340,10 @@ public void loadDataPartitioned() throws Exception { runStatementOnDriver("truncate table Tstage"); runStatementOnDriver("load data inpath '" + getWarehouseDir() + "/4/data' overwrite into table T partition(p=1)"); String[][] expected2 = new String[][] { - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", "t/p=0/delta_0000020_0000020_0000/000000_0"}, - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", "t/p=0/delta_0000020_0000020_0000/000000_0"}, - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":0}\t1\t5\t2", "t/p=1/base_0000033/000000_0"}, - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":1}\t1\t5\t4", "t/p=1/base_0000033/000000_0"}}; + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", "t/p=0/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", "t/p=0/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":0}\t1\t5\t2", "t/p=1/base_0000004/000000_0"}, + {"{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":1}\t1\t5\t4", "t/p=1/base_0000004/000000_0"}}; rs = runStatementOnDriver("select ROW__ID, p, a, b, INPUT__FILE__NAME from T order by p, ROW__ID"); checkExpected(rs, expected2, "load data inpath partitioned overwrite"); } @@ -405,20 +405,20 @@ private void testMultiStatement(boolean isVectorized) throws Exception { String testQuery = isVectorized ? "select ROW__ID, a, b from T order by ROW__ID" : "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; String[][] expected = new String[][] { - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000019_0000019_0000/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000019_0000019_0000/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870913,\"rowid\":0}\t5\t5", "t/delta_0000019_0000019_0001/000000_0"}, - {"{\"transactionid\":19,\"bucketid\":536870913,\"rowid\":1}\t6\t6", "t/delta_0000019_0000019_0001/000000_0"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":0}\t5\t5", "t/delta_0000001_0000001_0001/000000_0"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":1}\t6\t6", "t/delta_0000001_0000001_0001/000000_0"} }; checkResult(expected, testQuery, isVectorized, "load data inpath"); runStatementOnDriver("alter table T compact 'major'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected2 = new String[][] { - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000019/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000019/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870913,\"rowid\":0}\t5\t5", "t/base_0000019/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870913,\"rowid\":1}\t6\t6", "t/base_0000019/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":0}\t5\t5", "t/base_0000001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":1}\t6\t6", "t/base_0000001/bucket_00000"} }; checkResult(expected2, testQuery, isVectorized, "load data inpath (major)"); //at lest for now, Load Data w/Overwrite is not allowed in a txn: HIVE-18154 @@ -444,8 +444,8 @@ public void testAbort() throws Exception { String testQuery = isVectorized ? "select ROW__ID, a, b from T order by ROW__ID" : "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; String[][] expected = new String[][] { - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000019_0000019_0000/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000019_0000019_0000/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/bucket_00000"} }; checkResult(expected, testQuery, isVectorized, "load data inpath"); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java index 3c6b6be..b93e775 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java @@ -89,14 +89,14 @@ public void testNoBuckets() throws Exception { /**the insert creates 2 output files (presumably because there are 2 input files) * The number in the file name is writerId. This is the number encoded in ROW__ID.bucketId - * see {@link org.apache.hadoop.hive.ql.io.BucketCodec}*/ - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t0\t")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000019_0000019_0000/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000019_0000019_0000/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":19,\"bucketid\":536936448,\"rowid\":0}\t1\t1\t1\t")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000019_0000019_0000/bucket_00001")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":19,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000019_0000019_0000/bucket_00001")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t0\t")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t1\t1\t")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00001")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00001")); runStatementOnDriver("update nobuckets set c3 = 17 where c3 in(0,1)"); rs = runStatementOnDriver("select ROW__ID, c1, c2, c3, INPUT__FILE__NAME from nobuckets order by INPUT__FILE__NAME, ROW__ID"); @@ -104,22 +104,22 @@ public void testNoBuckets() throws Exception { for(String s : rs) { LOG.warn(s); } - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000019_0000019_0000/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":19,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000019_0000019_0000/bucket_00001")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00001")); //so update has 1 writer which creates bucket0 where both new rows land - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000021_0000021_0000/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t17\t")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000021_0000021_0000/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t17\t")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00000")); Set expectedFiles = new HashSet<>(); //both delete events land in a single bucket0. Each has a different ROW__ID.bucketId value (even writerId in it is different) - expectedFiles.add("ts/delete_delta_0000021_0000021_0000/bucket_00000"); - expectedFiles.add("nobuckets/delta_0000019_0000019_0000/bucket_00000"); - expectedFiles.add("nobuckets/delta_0000019_0000019_0000/bucket_00001"); - expectedFiles.add("nobuckets/delta_0000021_0000021_0000/bucket_00000"); + expectedFiles.add("ts/delete_delta_0000002_0000002_0000/bucket_00000"); + expectedFiles.add("nobuckets/delta_0000001_0000001_0000/bucket_00000"); + expectedFiles.add("nobuckets/delta_0000001_0000001_0000/bucket_00001"); + expectedFiles.add("nobuckets/delta_0000002_0000002_0000/bucket_00000"); //check that we get the right files on disk assertExpectedFileSet(expectedFiles, getWarehouseDir() + "/nobuckets"); //todo: it would be nice to check the contents of the files... could use orc.FileDump - it has @@ -133,33 +133,33 @@ public void testNoBuckets() throws Exception { LOG.warn(s); } /* -├── base_0000021 +├── base_0000002 │   ├── bucket_00000 │   └── bucket_00001 -├── delete_delta_0000021_0000021_0000 +├── delete_delta_0000002_0000002_0000 │   └── bucket_00000 -├── delta_0000019_0000019_0000 +├── delta_0000001_0000001_0000 │   ├── bucket_00000 │   └── bucket_00001 -└── delta_0000021_0000021_0000 +└── delta_0000002_0000002_0000 └── bucket_00000 */ - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/base_0000021/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/base_0000021/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t17\t")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/base_0000021/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":19,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/base_0000021/bucket_00001")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/base_0000002/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/base_0000002/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t17\t")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/base_0000002/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/base_0000002/bucket_00001")); expectedFiles.clear(); - expectedFiles.add("delete_delta_0000021_0000021_0000/bucket_00000"); - expectedFiles.add("uckets/delta_0000019_0000019_0000/bucket_00000"); - expectedFiles.add("uckets/delta_0000019_0000019_0000/bucket_00001"); - expectedFiles.add("uckets/delta_0000021_0000021_0000/bucket_00000"); - expectedFiles.add("/warehouse/nobuckets/base_0000021/bucket_00000"); - expectedFiles.add("/warehouse/nobuckets/base_0000021/bucket_00001"); + expectedFiles.add("delete_delta_0000002_0000002_0000/bucket_00000"); + expectedFiles.add("uckets/delta_0000001_0000001_0000/bucket_00000"); + expectedFiles.add("uckets/delta_0000001_0000001_0000/bucket_00001"); + expectedFiles.add("uckets/delta_0000002_0000002_0000/bucket_00000"); + expectedFiles.add("/warehouse/nobuckets/base_0000002/bucket_00000"); + expectedFiles.add("/warehouse/nobuckets/base_0000002/bucket_00001"); assertExpectedFileSet(expectedFiles, getWarehouseDir() + "/nobuckets"); TestTxnCommands2.runCleaner(hiveConf); @@ -168,8 +168,8 @@ public void testNoBuckets() throws Exception { Assert.assertEquals("Unexpected result after clean", stringifyValues(result), rs); expectedFiles.clear(); - expectedFiles.add("nobuckets/base_0000021/bucket_00000"); - expectedFiles.add("nobuckets/base_0000021/bucket_00001"); + expectedFiles.add("nobuckets/base_0000002/bucket_00000"); + expectedFiles.add("nobuckets/base_0000002/bucket_00001"); assertExpectedFileSet(expectedFiles, getWarehouseDir() + "/nobuckets"); } @@ -185,8 +185,8 @@ public void testCTAS() throws Exception { "'='true', 'transactional_properties'='default') as select a, b from " + Table.NONACIDORCTBL); List rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas order by ROW__ID"); String expected[][] = { - {"{\"transactionid\":14,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas/delta_0000014_0000014_0000/bucket_00000"}, - {"{\"transactionid\":14,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas/delta_0000014_0000014_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas/delta_0000001_0000001_0000/bucket_00000"}, }; checkExpected(rs, expected, "Unexpected row count after ctas from non acid table"); @@ -195,8 +195,8 @@ public void testCTAS() throws Exception { "'='true', 'transactional_properties'='default') as select a, b from " + Table.ACIDTBL);//todo: try this with acid default - it seem makeing table acid in listener is too late rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas2 order by ROW__ID"); String expected2[][] = { - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas2/delta_0000017_0000017_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas2/delta_0000017_0000017_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00000"}, }; checkExpected(rs, expected2, "Unexpected row count after ctas from acid table"); @@ -205,10 +205,10 @@ public void testCTAS() throws Exception { " union all select a, b from " + Table.ACIDTBL); rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas3 order by ROW__ID"); String expected3[][] = { - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas3/delta_0000019_0000019_0000/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas3/delta_0000019_0000019_0000/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536936448,\"rowid\":0}\t3\t4", "warehouse/myctas3/delta_0000019_0000019_0000/bucket_00001"}, - {"{\"transactionid\":19,\"bucketid\":536936448,\"rowid\":1}\t1\t2", "warehouse/myctas3/delta_0000019_0000019_0000/bucket_00001"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t3\t4", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00001"}, + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t1\t2", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00001"}, }; checkExpected(rs, expected3, "Unexpected row count after ctas from union all query"); @@ -217,8 +217,8 @@ public void testCTAS() throws Exception { " union distinct select a, b from " + Table.ACIDTBL); rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas4 order by ROW__ID"); String expected4[][] = { - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "/delta_0000021_0000021_0000/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "/delta_0000021_0000021_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "/delta_0000001_0000001_0000/bucket_00000"}, }; checkExpected(rs, expected4, "Unexpected row count after ctas from union distinct query"); } @@ -268,11 +268,11 @@ public void testInsertToAcidWithUnionRemove() throws Exception { List rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"); String expected[][] = { - {"{\"transactionid\":16,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000016_0000016_0001/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000016_0000016_0001/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870914,\"rowid\":0}\t7\t8", "/delta_0000016_0000016_0002/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870914,\"rowid\":1}\t5\t6", "/delta_0000016_0000016_0002/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "/delta_0000016_0000016_0003/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":0}\t7\t8", "/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":1}\t5\t6", "/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "/delta_0000001_0000001_0003/bucket_00000"}, }; checkExpected(rs, expected, "Unexpected row count after ctas"); } @@ -376,7 +376,7 @@ logical bucket (tranche) {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t12\t12", "warehouse/t/000000_0_copy_1"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t20\t40", "warehouse/t/HIVE_UNION_SUBDIR_15/000000_0"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t50\t60", "warehouse/t/HIVE_UNION_SUBDIR_16/000000_0"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":0}\t60\t88", "warehouse/t/delta_0000024_0000024_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t60\t88", "warehouse/t/delta_0000001_0000001_0000/bucket_00000"}, }; rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from T order by a, b, INPUT__FILE__NAME"); checkExpected(rs, expected3,"after converting to acid (no compaction with updates)"); @@ -388,15 +388,15 @@ logical bucket (tranche) /*Compaction preserves location of rows wrt buckets/tranches (for now)*/ String expected4[][] = { - {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":0}\t1\t2", "warehouse/t/base_0000026/bucket_00002"}, - {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":1}\t2\t4", "warehouse/t/base_0000026/bucket_00002"}, - {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":1}\t5\t6", "warehouse/t/base_0000026/bucket_00001"}, - {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":0}\t9\t10", "warehouse/t/base_0000026/bucket_00001"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}\t10\t20", "warehouse/t/base_0000026/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t12\t12", "warehouse/t/base_0000026/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t20\t40", "warehouse/t/base_0000026/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t50\t60", "warehouse/t/base_0000026/bucket_00000"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":0}\t60\t88", "warehouse/t/base_0000026/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":0}\t1\t2", "warehouse/t/base_0000002/bucket_00002"}, + {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":1}\t2\t4", "warehouse/t/base_0000002/bucket_00002"}, + {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":1}\t5\t6", "warehouse/t/base_0000002/bucket_00001"}, + {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":0}\t9\t10", "warehouse/t/base_0000002/bucket_00001"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}\t10\t20", "warehouse/t/base_0000002/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t12\t12", "warehouse/t/base_0000002/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t20\t40", "warehouse/t/base_0000002/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t50\t60", "warehouse/t/base_0000002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t60\t88", "warehouse/t/base_0000002/bucket_00000"}, }; checkExpected(rs, expected4,"after major compact"); } @@ -414,14 +414,14 @@ public void testInsertFromUnion() throws Exception { } /* The number of writers seems to be based on number of MR jobs for the src query. todo check number of FileSinks - warehouse/t/.hive-staging_hive_2017-09-13_08-59-28_141_6304543600372946004-1/-ext-10000/000000_0/delta_0000016_0000016_0000/bucket_00000 [length: 648] - {"operation":0,"originalTransaction":16,"bucket":536870912,"rowId":0,"currentTransaction":16,"row":{"_col0":1,"_col1":2}} - {"operation":0,"originalTransaction":16,"bucket":536870912,"rowId":1,"currentTransaction":16,"row":{"_col0":2,"_col1":4}} + warehouse/t/.hive-staging_hive_2017-09-13_08-59-28_141_6304543600372946004-1/-ext-10000/000000_0/delta_0000001_0000001_0000/bucket_00000 [length: 648] + {"operation":0,"originalTransaction":1,"bucket":536870912,"rowId":0,"currentTransaction":1,"row":{"_col0":1,"_col1":2}} + {"operation":0,"originalTransaction":1,"bucket":536870912,"rowId":1,"currentTransaction":1,"row":{"_col0":2,"_col1":4}} ________________________________________________________________________________________________________________________ - warehouse/t/.hive-staging_hive_2017-09-13_08-59-28_141_6304543600372946004-1/-ext-10000/000001_0/delta_0000016_0000016_0000/bucket_00001 [length: 658] - {"operation":0,"originalTransaction":16,"bucket":536936448,"rowId":0,"currentTransaction":16,"row":{"_col0":5,"_col1":6}} - {"operation":0,"originalTransaction":16,"bucket":536936448,"rowId":1,"currentTransaction":16,"row":{"_col0":6,"_col1":8}} - {"operation":0,"originalTransaction":16,"bucket":536936448,"rowId":2,"currentTransaction":16,"row":{"_col0":9,"_col1":10}} + warehouse/t/.hive-staging_hive_2017-09-13_08-59-28_141_6304543600372946004-1/-ext-10000/000001_0/delta_0000001_0000001_0000/bucket_00001 [length: 658] + {"operation":0,"originalTransaction":1,"bucket":536936448,"rowId":0,"currentTransaction":1,"row":{"_col0":5,"_col1":6}} + {"operation":0,"originalTransaction":1,"bucket":536936448,"rowId":1,"currentTransaction":1,"row":{"_col0":6,"_col1":8}} + {"operation":0,"originalTransaction":1,"bucket":536936448,"rowId":2,"currentTransaction":1,"row":{"_col0":9,"_col1":10}} */ rs = runStatementOnDriver("select a, b from T order by a, b"); Assert.assertEquals(stringifyValues(values), rs); @@ -469,14 +469,14 @@ public void testToAcidConversion02() throws Exception { */ String[][] expected = { {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t0\t13", "bucket_00000", "000000_0_copy_1"}, - {"{\"transactionid\":18,\"bucketid\":536870912,\"rowid\":0}\t0\t15", "bucket_00000", "bucket_00000"}, - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":0}\t0\t17", "bucket_00000", "bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\t0\t120", "bucket_00000", "bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t15", "bucket_00000", "bucket_00000"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t0\t17", "bucket_00000", "bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t120", "bucket_00000", "bucket_00000"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "bucket_00000", "000000_0"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t1\t4", "bucket_00000", "000000_0_copy_1"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":5}\t1\t5", "bucket_00000", "000000_0_copy_1"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":6}\t1\t6", "bucket_00000", "000000_0_copy_2"}, - {"{\"transactionid\":18,\"bucketid\":536870912,\"rowid\":1}\t1\t16", "bucket_00000", "bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t16", "bucket_00000", "bucket_00000"} }; Assert.assertEquals("Unexpected row count before compaction", expected.length, rs.size()); for(int i = 0; i < expected.length; i++) { @@ -491,17 +491,17 @@ public void testToAcidConversion02() throws Exception { ├── 000000_0 ├── 000000_0_copy_1 ├── 000000_0_copy_2 - ├── base_0000021 + ├── base_0000004 │   └── bucket_00000 - ├── delete_delta_0000019_0000019_0000 + ├── delete_delta_0000002_0000002_0000 │   └── bucket_00000 - ├── delete_delta_0000021_0000021_0000 + ├── delete_delta_0000004_0000004_0000 │   └── bucket_00000 - ├── delta_0000018_0000018_0000 + ├── delta_0000001_0000001_0000 │   └── bucket_00000 - ├── delta_0000019_0000019_0000 + ├── delta_0000002_0000002_0000 │   └── bucket_00000 - └── delta_0000020_0000020_0000 + └── delta_0000003_0000003_0000 └── bucket_00000 6 directories, 9 files @@ -606,7 +606,7 @@ public void testNonAcidToAcidVectorzied() throws Exception { query = "select ROW__ID, b from T where b > 0 order by a"; rs = runStatementOnDriver(query); String[][] expected4 = { - {"{\"transactionid\":25,\"bucketid\":536870912,\"rowid\":0}","17"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}","17"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}","4"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}","6"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}","8"}, @@ -627,11 +627,11 @@ public void testNonAcidToAcidVectorzied() throws Exception { query = "select ROW__ID, a, b, INPUT__FILE__NAME from T where b > 0 order by a, b"; rs = runStatementOnDriver(query); String[][] expected5 = {//the row__ids are the same after compaction - {"{\"transactionid\":25,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "warehouse/t/base_0000025/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t2\t4", "warehouse/t/base_0000025/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}\t5\t6", "warehouse/t/base_0000025/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t6\t8", "warehouse/t/base_0000025/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t9\t10", "warehouse/t/base_0000025/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "warehouse/t/base_0000001/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t2\t4", "warehouse/t/base_0000001/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}\t5\t6", "warehouse/t/base_0000001/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t6\t8", "warehouse/t/base_0000001/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t9\t10", "warehouse/t/base_0000001/bucket_00000"} }; checkExpected(rs, expected5, "After major compaction"); //vectorized because there is INPUT__FILE__NAME @@ -671,14 +671,14 @@ public void testCompactStatsGather() throws Exception { String query = "select ROW__ID, p, q, a, b, INPUT__FILE__NAME from T order by p, q, a, b"; List rs = runStatementOnDriver(query); String[][] expected = { - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000017_0000017_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000017_0000017_0000/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/delta_0000017_0000017_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/delta_0000017_0000017_0000/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/delta_0000002_0000002_0000/bucket_00000"} }; checkExpected(rs, expected, "insert data"); @@ -689,14 +689,14 @@ public void testCompactStatsGather() throws Exception { query = "select ROW__ID, p, q, a, b, INPUT__FILE__NAME from T order by p, q, a, b"; rs = runStatementOnDriver(query); String[][] expected2 = { - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000017_0000017_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000017_0000017_0000/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/base_0000017/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/base_0000017/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/base_0000017/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/base_0000017/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/base_0000002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/base_0000002/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/base_0000002/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/base_0000002/bucket_00000"} }; checkExpected(rs, expected2, "after major compaction"); @@ -721,8 +721,8 @@ public void testDefault() throws Exception { List rs = runStatementOnDriver(query); String[][] expected = { //this proves data is written in Acid layout so T was made Acid - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000015_0000015_0000/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/bucket_00000"} }; checkExpected(rs, expected, "insert data"); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java index 2a520f4..461d142 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java @@ -27,7 +27,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.CompilationOpContext; @@ -292,8 +292,8 @@ private FileSinkOperator getFileSink(AcidUtils.Operation writeType, } desc.setWriteType(writeType); desc.setGatherStats(true); - if (txnId > 0) desc.setTransactionId(txnId); - if (writeType != AcidUtils.Operation.NOT_ACID) desc.setTransactionId(1L); + if (txnId > 0) desc.setTableWriteId(txnId); + if (writeType != AcidUtils.Operation.NOT_ACID) desc.setTableWriteId(1L); FileSinkOperator op = (FileSinkOperator)OperatorFactory.get( new CompilationOpContext(), FileSinkDesc.class); @@ -699,7 +699,7 @@ public float getProgress() throws IOException { public RawReader getRawReader(Configuration conf, boolean collapseEvents, int bucket, - ValidTxnList validTxnList, + ValidWriteIdList validWriteIdList, Path baseDirectory, Path[] deltaDirectory) throws IOException { @@ -725,18 +725,18 @@ public RecordUpdater getRecordUpdater(final Path path, final Options options) th return new RecordUpdater() { @Override - public void insert(long currentTransaction, Object row) throws IOException { + public void insert(long currentWriteId, Object row) throws IOException { addRow(row); numRecordsAdded++; } @Override - public void update(long currentTransaction, Object row) throws IOException { + public void update(long currentWriteId, Object row) throws IOException { addRow(row); } @Override - public void delete(long currentTransaction, Object row) throws IOException { + public void delete(long currentWriteId, Object row) throws IOException { addRow(row); numRecordsAdded--; } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java index 23dadd0..a1eb39d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java @@ -49,8 +49,8 @@ public void testDeltaMetaDataReadFieldsNoStatementIds() throws Exception { deltaMetaData.readFields(mockDataInput); verify(mockDataInput, times(1)).readInt(); - assertThat(deltaMetaData.getMinTxnId(), is(1L)); - assertThat(deltaMetaData.getMaxTxnId(), is(2L)); + assertThat(deltaMetaData.getMinWriteId(), is(1L)); + assertThat(deltaMetaData.getMaxWriteId(), is(2L)); assertThat(deltaMetaData.getStmtIds().isEmpty(), is(true)); } @@ -63,8 +63,8 @@ public void testDeltaMetaDataReadFieldsWithStatementIds() throws Exception { deltaMetaData.readFields(mockDataInput); verify(mockDataInput, times(3)).readInt(); - assertThat(deltaMetaData.getMinTxnId(), is(1L)); - assertThat(deltaMetaData.getMaxTxnId(), is(2L)); + assertThat(deltaMetaData.getMinWriteId(), is(1L)); + assertThat(deltaMetaData.getMaxWriteId(), is(2L)); assertThat(deltaMetaData.getStmtIds().size(), is(2)); assertThat(deltaMetaData.getStmtIds().get(0), is(100)); assertThat(deltaMetaData.getStmtIds().get(1), is(101)); @@ -74,8 +74,8 @@ public void testDeltaMetaDataReadFieldsWithStatementIds() throws Exception { public void testDeltaMetaConstructWithState() throws Exception { DeltaMetaData deltaMetaData = new AcidInputFormat.DeltaMetaData(2000L, 2001L, Arrays.asList(97, 98, 99)); - assertThat(deltaMetaData.getMinTxnId(), is(2000L)); - assertThat(deltaMetaData.getMaxTxnId(), is(2001L)); + assertThat(deltaMetaData.getMinWriteId(), is(2000L)); + assertThat(deltaMetaData.getMaxWriteId(), is(2001L)); assertThat(deltaMetaData.getStmtIds().size(), is(3)); assertThat(deltaMetaData.getStmtIds().get(0), is(97)); assertThat(deltaMetaData.getStmtIds().get(1), is(98)); @@ -95,8 +95,8 @@ public void testDeltaMetaDataReadFieldsWithStatementIdsResetsState() throws Exce deltaMetaData.readFields(mockDataInput); verify(mockDataInput, times(3)).readInt(); - assertThat(deltaMetaData.getMinTxnId(), is(1L)); - assertThat(deltaMetaData.getMaxTxnId(), is(2L)); + assertThat(deltaMetaData.getMinWriteId(), is(1L)); + assertThat(deltaMetaData.getMaxWriteId(), is(2L)); assertThat(deltaMetaData.getStmtIds().size(), is(2)); assertThat(deltaMetaData.getStmtIds().get(0), is(100)); assertThat(deltaMetaData.getStmtIds().get(1), is(101)); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java index 8945fdf..437980b 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java @@ -27,13 +27,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidCompactorTxnList; -import org.apache.hadoop.hive.common.ValidReadTxnList; +import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.AcidUtils.AcidOperationalProperties; -import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat; import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockFile; import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockFileSystem; import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockPath; @@ -56,8 +55,8 @@ public void testCreateFilename() throws Exception { assertEquals("/tmp/000123_0", AcidUtils.createFilename(p, options).toString()); options.bucket(23) - .minimumTransactionId(100) - .maximumTransactionId(200) + .minimumWriteId(100) + .maximumWriteId(200) .writingBase(true) .setOldStyle(false); assertEquals("/tmp/base_0000200/bucket_00023", @@ -92,8 +91,8 @@ public void testCreateFilenameLargeIds() throws Exception { assertEquals("/tmp/123456789_0", AcidUtils.createFilename(p, options).toString()); options.bucket(23) - .minimumTransactionId(1234567880) - .maximumTransactionId(1234567890) + .minimumWriteId(1234567880) + .maximumWriteId(1234567890) .writingBase(true) .setOldStyle(false); assertEquals("/tmp/base_1234567890/bucket_00023", @@ -118,29 +117,29 @@ public void testParsing() throws Exception { conf); assertEquals(false, opts.getOldStyle()); assertEquals(true, opts.isWritingBase()); - assertEquals(567, opts.getMaximumTransactionId()); - assertEquals(0, opts.getMinimumTransactionId()); + assertEquals(567, opts.getMaximumWriteId()); + assertEquals(0, opts.getMinimumWriteId()); assertEquals(123, opts.getBucketId()); opts = AcidUtils.parseBaseOrDeltaBucketFilename( new MockPath(fs, dir + "/delta_000005_000006/bucket_00001"), conf); assertEquals(false, opts.getOldStyle()); assertEquals(false, opts.isWritingBase()); - assertEquals(6, opts.getMaximumTransactionId()); - assertEquals(5, opts.getMinimumTransactionId()); + assertEquals(6, opts.getMaximumWriteId()); + assertEquals(5, opts.getMinimumWriteId()); assertEquals(1, opts.getBucketId()); opts = AcidUtils.parseBaseOrDeltaBucketFilename( new MockPath(fs, dir + "/delete_delta_000005_000006/bucket_00001"), conf); assertEquals(false, opts.getOldStyle()); assertEquals(false, opts.isWritingBase()); - assertEquals(6, opts.getMaximumTransactionId()); - assertEquals(5, opts.getMinimumTransactionId()); + assertEquals(6, opts.getMaximumWriteId()); + assertEquals(5, opts.getMinimumWriteId()); assertEquals(1, opts.getBucketId()); opts = AcidUtils.parseBaseOrDeltaBucketFilename(new Path(dir, "000123_0"), conf); assertEquals(true, opts.getOldStyle()); assertEquals(true, opts.isWritingBase()); assertEquals(123, opts.getBucketId()); - assertEquals(0, opts.getMinimumTransactionId()); - assertEquals(0, opts.getMaximumTransactionId()); + assertEquals(0, opts.getMinimumWriteId()); + assertEquals(0, opts.getMaximumWriteId()); } @@ -160,7 +159,7 @@ public void testOriginal() throws Exception { new MockFile("mock:/tbl/part1/subdir/000000_0", 0, new byte[0])); AcidUtils.Directory dir = AcidUtils.getAcidState(new MockPath(fs, "/tbl/part1"), conf, - new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals(null, dir.getBaseDirectory()); assertEquals(0, dir.getCurrentDirectories().size()); assertEquals(0, dir.getObsolete().size()); @@ -195,7 +194,7 @@ public void testOriginalDeltas() throws Exception { new MockFile("mock:/tbl/part1/delta_101_101/bucket_0", 0, new byte[0])); AcidUtils.Directory dir = AcidUtils.getAcidState(new MockPath(fs, - "mock:/tbl/part1"), conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + "mock:/tbl/part1"), conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals(null, dir.getBaseDirectory()); List obsolete = dir.getObsolete(); assertEquals(2, obsolete.size()); @@ -215,12 +214,12 @@ public void testOriginalDeltas() throws Exception { assertEquals(2, deltas.size()); AcidUtils.ParsedDelta delt = deltas.get(0); assertEquals("mock:/tbl/part1/delta_025_030", delt.getPath().toString()); - assertEquals(25, delt.getMinTransaction()); - assertEquals(30, delt.getMaxTransaction()); + assertEquals(25, delt.getMinWriteId()); + assertEquals(30, delt.getMaxWriteId()); delt = deltas.get(1); assertEquals("mock:/tbl/part1/delta_050_100", delt.getPath().toString()); - assertEquals(50, delt.getMinTransaction()); - assertEquals(100, delt.getMaxTransaction()); + assertEquals(50, delt.getMinWriteId()); + assertEquals(100, delt.getMaxWriteId()); } @Test @@ -237,7 +236,7 @@ public void testBaseDeltas() throws Exception { new MockFile("mock:/tbl/part1/delta_90_120/bucket_0", 0, new byte[0])); AcidUtils.Directory dir = AcidUtils.getAcidState(new MockPath(fs, - "mock:/tbl/part1"), conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + "mock:/tbl/part1"), conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_49", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); assertEquals(5, obsolete.size()); @@ -251,8 +250,8 @@ public void testBaseDeltas() throws Exception { assertEquals(1, deltas.size()); AcidUtils.ParsedDelta delt = deltas.get(0); assertEquals("mock:/tbl/part1/delta_050_105", delt.getPath().toString()); - assertEquals(50, delt.getMinTransaction()); - assertEquals(105, delt.getMaxTransaction()); + assertEquals(50, delt.getMinWriteId()); + assertEquals(105, delt.getMaxWriteId()); } @Test @@ -265,7 +264,7 @@ public void testObsoleteOriginals() throws Exception { new MockFile("mock:/tbl/part1/000001_1", 500, new byte[0])); Path part = new MockPath(fs, "/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidReadTxnList("150:" + Long.MAX_VALUE + ":")); + AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:150:" + Long.MAX_VALUE + ":")); // Obsolete list should include the two original bucket files, and the old base dir List obsolete = dir.getObsolete(); assertEquals(3, obsolete.size()); @@ -286,7 +285,7 @@ public void testOverlapingDelta() throws Exception { new MockFile("mock:/tbl/part1/base_50/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_50", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); assertEquals(2, obsolete.size()); @@ -321,7 +320,7 @@ public void testOverlapingDelta2() throws Exception { new MockFile("mock:/tbl/part1/base_50/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_50", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); assertEquals(5, obsolete.size()); @@ -346,7 +345,7 @@ public void deltasWithOpenTxnInRead() throws Exception { new MockFile("mock:/tbl/part1/delta_1_1/bucket_0", 500, new byte[0]), new MockFile("mock:/tbl/part1/delta_2_5/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); - AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:4:4")); + AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:4:4")); List delts = dir.getCurrentDirectories(); assertEquals(2, delts.size()); assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString()); @@ -367,7 +366,7 @@ public void deltasWithOpenTxnInRead2() throws Exception { new MockFile("mock:/tbl/part1/delta_4_4_3/bucket_0", 500, new byte[0]), new MockFile("mock:/tbl/part1/delta_101_101_1/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); - AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:4:4")); + AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:4:4")); List delts = dir.getCurrentDirectories(); assertEquals(2, delts.size()); assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString()); @@ -382,7 +381,7 @@ public void deltasWithOpenTxnsNotInCompact() throws Exception { new MockFile("mock:/tbl/part1/delta_2_5/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidCompactorTxnList("4:" + Long.MAX_VALUE)); + AcidUtils.getAcidState(part, conf, new ValidCompactorWriteIdList("tbl:4:" + Long.MAX_VALUE)); List delts = dir.getCurrentDirectories(); assertEquals(1, delts.size()); assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString()); @@ -399,7 +398,7 @@ public void deltasWithOpenTxnsNotInCompact2() throws Exception { new MockFile("mock:/tbl/part1/delta_6_10/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidCompactorTxnList("3:" + Long.MAX_VALUE)); + AcidUtils.getAcidState(part, conf, new ValidCompactorWriteIdList("tbl:3:" + Long.MAX_VALUE)); List delts = dir.getCurrentDirectories(); assertEquals(1, delts.size()); assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString()); @@ -424,7 +423,7 @@ public void testBaseWithDeleteDeltas() throws Exception { new MockFile("mock:/tbl/part1/delete_delta_110_110/bucket_0", 0, new byte[0])); AcidUtils.Directory dir = AcidUtils.getAcidState(new MockPath(fs, - "mock:/tbl/part1"), conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + "mock:/tbl/part1"), conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_49", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); assertEquals(7, obsolete.size()); @@ -461,7 +460,7 @@ public void testOverlapingDeltaAndDeleteDelta() throws Exception { new MockFile("mock:/tbl/part1/base_50/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_50", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); assertEquals(3, obsolete.size()); @@ -490,7 +489,7 @@ public void testMinorCompactedDeltaMakesInBetweenDelteDeltaObsolete() throws Exc new MockFile("mock:/tbl/part1/delete_delta_50_50/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); List obsolete = dir.getObsolete(); assertEquals(1, obsolete.size()); assertEquals("mock:/tbl/part1/delete_delta_50_50", obsolete.get(0).getPath().toString()); @@ -517,7 +516,7 @@ public void deltasAndDeleteDeltasWithOpenTxnsNotInCompact() throws Exception { new MockFile("mock:/tbl/part1/delta_6_10/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidCompactorTxnList("4:" + Long.MAX_VALUE + ":")); + AcidUtils.getAcidState(part, conf, new ValidCompactorWriteIdList("tbl:4:" + Long.MAX_VALUE + ":")); List delts = dir.getCurrentDirectories(); assertEquals(2, delts.size()); assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString()); @@ -538,7 +537,7 @@ public void deleteDeltasWithOpenTxnInRead() throws Exception { new MockFile("mock:/tbl/part1/delta_4_4_3/bucket_0", 500, new byte[0]), new MockFile("mock:/tbl/part1/delta_101_101_1/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); - AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:4:4")); + AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:4:4")); List delts = dir.getCurrentDirectories(); assertEquals(3, delts.size()); assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index 92f005d..8c7e79b 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -48,7 +48,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -2267,7 +2267,7 @@ public void testVectorizationWithAcid() throws Exception { // write the orc file to the mock file system Path partDir = new Path(conf.get("mapred.input.dir")); OrcRecordUpdater writer = new OrcRecordUpdater(partDir, - new AcidOutputFormat.Options(conf).maximumTransactionId(10) + new AcidOutputFormat.Options(conf).maximumWriteId(10) .writingBase(true).bucket(0).inspector(inspector).finalDestination(partDir)); for (int i = 0; i < 100; ++i) { BigRow row = new BigRow(i); @@ -2424,7 +2424,7 @@ public void testCombinationInputFormatWithAcid() throws Exception { // write a base file in partition 0 OrcRecordUpdater writer = new OrcRecordUpdater(partDir[0], - new AcidOutputFormat.Options(conf).maximumTransactionId(10) + new AcidOutputFormat.Options(conf).maximumWriteId(10) .writingBase(true).bucket(0).inspector(inspector).finalDestination(partDir[0])); for(int i=0; i < 10; ++i) { writer.insert(10, new MyRow(i, 2 * i)); @@ -2437,7 +2437,7 @@ public void testCombinationInputFormatWithAcid() throws Exception { // write a delta file in partition 0 writer = new OrcRecordUpdater(partDir[0], - new AcidOutputFormat.Options(conf).maximumTransactionId(10) + new AcidOutputFormat.Options(conf).maximumWriteId(10) .writingBase(true).bucket(1).inspector(inspector).finalDestination(partDir[0])); for(int i=10; i < 20; ++i) { writer.insert(10, new MyRow(i, 2*i)); @@ -3558,12 +3558,12 @@ public void testACIDReaderNoFooterSerializeWithDeltas() throws Exception { } writer.close(); - AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).bucket(1).minimumTransactionId(1) - .maximumTransactionId(1).inspector(inspector).finalDestination(mockPath); + AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).bucket(1).minimumWriteId(1) + .maximumWriteId(1).inspector(inspector).finalDestination(mockPath); OrcOutputFormat of = new OrcOutputFormat(); RecordUpdater ru = of.getRecordUpdater(mockPath, options); for (int i = 0; i < 10; ++i) { - ru.insert(options.getMinimumTransactionId(), new MyRow(i, 2 * i)); + ru.insert(options.getMinimumWriteId(), new MyRow(i, 2 * i)); } ru.close(false);//this deletes the side file @@ -3637,12 +3637,12 @@ public void testACIDReaderFooterSerializeWithDeltas() throws Exception { } writer.close(); - AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).bucket(1).minimumTransactionId(1) - .maximumTransactionId(1).inspector(inspector).finalDestination(mockPath); + AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).bucket(1).minimumWriteId(1) + .maximumWriteId(1).inspector(inspector).finalDestination(mockPath); OrcOutputFormat of = new OrcOutputFormat(); RecordUpdater ru = of.getRecordUpdater(mockPath, options); for (int i = 0; i < 10; ++i) { - ru.insert(options.getMinimumTransactionId(), new MyRow(i, 2 * i)); + ru.insert(options.getMinimumWriteId(), new MyRow(i, 2 * i)); } ru.close(false);//this deletes the side file @@ -3894,7 +3894,7 @@ public void testColumnProjectionWithAcid() throws Exception { long fileLength = fs.getFileStatus(testFilePath).getLen(); // test with same schema with include - conf.set(ValidTxnList.VALID_TXNS_KEY, "100:99:"); + conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, "tbl:100:99:"); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "a,b,d"); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "int,struct,string"); conf.set(ColumnProjectionUtils.READ_ALL_COLUMNS, "false"); @@ -3911,7 +3911,7 @@ public void testColumnProjectionWithAcid() throws Exception { while (reader.next(id, struct)) { assertEquals("id " + record, record, id.getRowId()); assertEquals("bucket " + record, 0, id.getBucketProperty()); - assertEquals("trans " + record, 1, id.getTransactionId()); + assertEquals("writeid " + record, 1, id.getWriteId()); assertEquals("a " + record, 42 * record, ((IntWritable) struct.getFieldValue(0)).get()); assertEquals(null, struct.getFieldValue(1)); @@ -3938,7 +3938,7 @@ public void testColumnProjectionWithAcid() throws Exception { while (reader.next(id, struct)) { assertEquals("id " + record, record, id.getRowId()); assertEquals("bucket " + record, 0, id.getBucketProperty()); - assertEquals("trans " + record, 1, id.getTransactionId()); + assertEquals("writeid " + record, 1, id.getWriteId()); assertEquals("a " + record, 42 * record, ((IntWritable) struct.getFieldValue(0)).get()); assertEquals(null, struct.getFieldValue(1)); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java index c6a866a..3f04ee0 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java @@ -35,8 +35,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.common.ValidReadTxnList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.io.AcidOutputFormat; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -121,11 +121,11 @@ private static void setRow(OrcStruct event, long currentTransaction, String value) { event.setFieldValue(OrcRecordUpdater.OPERATION, new IntWritable(operation)); - event.setFieldValue(OrcRecordUpdater.ORIGINAL_TRANSACTION, + event.setFieldValue(OrcRecordUpdater.ORIGINAL_WRITEID, new LongWritable(originalTransaction)); event.setFieldValue(OrcRecordUpdater.BUCKET, new IntWritable(bucket)); event.setFieldValue(OrcRecordUpdater.ROW_ID, new LongWritable(rowId)); - event.setFieldValue(OrcRecordUpdater.CURRENT_TRANSACTION, + event.setFieldValue(OrcRecordUpdater.CURRENT_WRITEID, new LongWritable(currentTransaction)); OrcStruct row = new OrcStruct(1); row.setFieldValue(0, new Text(value)); @@ -195,17 +195,17 @@ public void testReaderPair() throws Exception { ReaderPair pair = new OrcRawRecordMerger.ReaderPairAcid(key, reader, minKey, maxKey, new Reader.Options(), 0); RecordReader recordReader = pair.getRecordReader(); - assertEquals(10, key.getTransactionId()); + assertEquals(10, key.getWriteId()); assertEquals(20, key.getBucketProperty()); assertEquals(40, key.getRowId()); - assertEquals(120, key.getCurrentTransactionId()); + assertEquals(120, key.getCurrentWriteId()); assertEquals("third", value(pair.nextRecord())); pair.next(pair.nextRecord()); - assertEquals(40, key.getTransactionId()); + assertEquals(40, key.getWriteId()); assertEquals(50, key.getBucketProperty()); assertEquals(60, key.getRowId()); - assertEquals(130, key.getCurrentTransactionId()); + assertEquals(130, key.getCurrentWriteId()); assertEquals("fourth", value(pair.nextRecord())); pair.next(pair.nextRecord()); @@ -221,38 +221,38 @@ public void testReaderPairNoMin() throws Exception { ReaderPair pair = new OrcRawRecordMerger.ReaderPairAcid(key, reader, null, null, new Reader.Options(), 0); RecordReader recordReader = pair.getRecordReader(); - assertEquals(10, key.getTransactionId()); + assertEquals(10, key.getWriteId()); assertEquals(20, key.getBucketProperty()); assertEquals(20, key.getRowId()); - assertEquals(100, key.getCurrentTransactionId()); + assertEquals(100, key.getCurrentWriteId()); assertEquals("first", value(pair.nextRecord())); pair.next(pair.nextRecord()); - assertEquals(10, key.getTransactionId()); + assertEquals(10, key.getWriteId()); assertEquals(20, key.getBucketProperty()); assertEquals(30, key.getRowId()); - assertEquals(110, key.getCurrentTransactionId()); + assertEquals(110, key.getCurrentWriteId()); assertEquals("second", value(pair.nextRecord())); pair.next(pair.nextRecord()); - assertEquals(10, key.getTransactionId()); + assertEquals(10, key.getWriteId()); assertEquals(20, key.getBucketProperty()); assertEquals(40, key.getRowId()); - assertEquals(120, key.getCurrentTransactionId()); + assertEquals(120, key.getCurrentWriteId()); assertEquals("third", value(pair.nextRecord())); pair.next(pair.nextRecord()); - assertEquals(40, key.getTransactionId()); + assertEquals(40, key.getWriteId()); assertEquals(50, key.getBucketProperty()); assertEquals(60, key.getRowId()); - assertEquals(130, key.getCurrentTransactionId()); + assertEquals(130, key.getCurrentWriteId()); assertEquals("fourth", value(pair.nextRecord())); pair.next(pair.nextRecord()); - assertEquals(40, key.getTransactionId()); + assertEquals(40, key.getWriteId()); assertEquals(50, key.getBucketProperty()); assertEquals(61, key.getRowId()); - assertEquals(140, key.getCurrentTransactionId()); + assertEquals(140, key.getCurrentWriteId()); assertEquals("fifth", value(pair.nextRecord())); pair.next(pair.nextRecord()); @@ -303,19 +303,19 @@ public void testOriginalReaderPair() throws Exception { fs.makeQualified(root); fs.create(root); ReaderPair pair = new OrcRawRecordMerger.OriginalReaderPairToRead(key, reader, BUCKET, minKey, maxKey, - new Reader.Options().include(includes), new OrcRawRecordMerger.Options().rootPath(root), conf, new ValidReadTxnList(), 0); + new Reader.Options().include(includes), new OrcRawRecordMerger.Options().rootPath(root), conf, new ValidReaderWriteIdList(), 0); RecordReader recordReader = pair.getRecordReader(); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(2, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); assertEquals("third", value(pair.nextRecord())); pair.next(pair.nextRecord()); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(3, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); assertEquals("fourth", value(pair.nextRecord())); pair.next(pair.nextRecord()); @@ -323,8 +323,8 @@ public void testOriginalReaderPair() throws Exception { Mockito.verify(recordReader).close(); } - private static ValidTxnList createMaximalTxnList() { - return new ValidReadTxnList(); + private static ValidWriteIdList createMaximalTxnList() { + return new ValidReaderWriteIdList(); } @Test @@ -339,40 +339,40 @@ public void testOriginalReaderPairNoMin() throws Exception { fs.makeQualified(root); fs.create(root); ReaderPair pair = new OrcRawRecordMerger.OriginalReaderPairToRead(key, reader, BUCKET, null, null, - new Reader.Options(), new OrcRawRecordMerger.Options().rootPath(root), conf, new ValidReadTxnList(), 0); + new Reader.Options(), new OrcRawRecordMerger.Options().rootPath(root), conf, new ValidReaderWriteIdList(), 0); assertEquals("first", value(pair.nextRecord())); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(0, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); pair.next(pair.nextRecord()); assertEquals("second", value(pair.nextRecord())); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(1, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); pair.next(pair.nextRecord()); assertEquals("third", value(pair.nextRecord())); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(2, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); pair.next(pair.nextRecord()); assertEquals("fourth", value(pair.nextRecord())); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(3, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); pair.next(pair.nextRecord()); assertEquals("fifth", value(pair.nextRecord())); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(4, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); pair.next(pair.nextRecord()); assertEquals(null, pair.nextRecord()); @@ -452,13 +452,13 @@ public void testNewBase() throws Exception { OrcStruct event = merger.createValue(); assertEquals(true, merger.next(id, event)); - assertEquals(10, id.getTransactionId()); + assertEquals(10, id.getWriteId()); assertEquals(20, id.getBucketProperty()); assertEquals(40, id.getRowId()); assertEquals("third", getValue(event)); assertEquals(true, merger.next(id, event)); - assertEquals(40, id.getTransactionId()); + assertEquals(40, id.getWriteId()); assertEquals(50, id.getBucketProperty()); assertEquals(60, id.getRowId()); assertEquals("fourth", getValue(event)); @@ -477,9 +477,9 @@ public void testNewBase() throws Exception { assertEquals("operation", fields.get(OrcRecordUpdater.OPERATION).getFieldName()); assertEquals("currentTransaction", - fields.get(OrcRecordUpdater.CURRENT_TRANSACTION).getFieldName()); + fields.get(OrcRecordUpdater.CURRENT_WRITEID).getFieldName()); assertEquals("originalTransaction", - fields.get(OrcRecordUpdater.ORIGINAL_TRANSACTION).getFieldName()); + fields.get(OrcRecordUpdater.ORIGINAL_WRITEID).getFieldName()); assertEquals("bucket", fields.get(OrcRecordUpdater.BUCKET).getFieldName()); assertEquals("rowId", @@ -538,15 +538,15 @@ public void testGetLogicalLength() throws Exception { } /*create delta_1_1_0/bucket0 with 1 row and close the file*/ AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) - .inspector(inspector).bucket(BUCKET).writingBase(false).minimumTransactionId(1) - .maximumTransactionId(1).finalDestination(root); + .inspector(inspector).bucket(BUCKET).writingBase(false).minimumWriteId(1) + .maximumWriteId(1).finalDestination(root); Path delta1_1_0 = new Path(root, AcidUtils.deltaSubdir( - options.getMinimumTransactionId(), options.getMaximumTransactionId(), options.getStatementId())); + options.getMinimumWriteId(), options.getMaximumWriteId(), options.getStatementId())); Path bucket0 = AcidUtils.createBucketFile(delta1_1_0, BUCKET); Path bucket0SideFile = OrcAcidUtils.getSideFile(bucket0); RecordUpdater ru = of.getRecordUpdater(root, options); - ru.insert(options.getMaximumTransactionId(), new MyRow("first")); + ru.insert(options.getMaximumWriteId(), new MyRow("first")); ru.close(false); FileStatus bucket0File = fs.getFileStatus(bucket0); @@ -581,7 +581,7 @@ public void testEmpty() throws Exception { // write the empty base AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) .inspector(inspector).bucket(BUCKET).writingBase(true) - .maximumTransactionId(100).finalDestination(root); + .maximumWriteId(100).finalDestination(root); of.getRecordUpdater(root, options).close(false); { /*OrcRecordUpdater is inconsistent about when it creates empty files and when it does not. @@ -593,8 +593,8 @@ public void testEmpty() throws Exception { AcidUtils.baseDir(100)), BUCKET), wo); w.close(); } - ValidTxnList txnList = new ValidReadTxnList("200:" + Long.MAX_VALUE); - AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, txnList); + ValidWriteIdList writeIdList = new ValidReaderWriteIdList("testEmpty:200:" + Long.MAX_VALUE); + AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, writeIdList); Path basePath = AcidUtils.createBucketFile(directory.getBaseDirectory(), BUCKET); @@ -646,7 +646,7 @@ private void testNewBaseAndDelta(boolean use130Format) throws Exception { options.statementId(-1); } RecordUpdater ru = of.getRecordUpdater(root, - options.writingBase(true).maximumTransactionId(100)); + options.writingBase(true).maximumWriteId(100)); for(String v: values) { ru.insert(0, new MyRow(v)); } @@ -654,7 +654,7 @@ private void testNewBaseAndDelta(boolean use130Format) throws Exception { // write a delta ru = of.getRecordUpdater(root, options.writingBase(false) - .minimumTransactionId(200).maximumTransactionId(200).recordIdColumn(1)); + .minimumWriteId(200).maximumWriteId(200).recordIdColumn(1)); ru.update(200, new MyRow("update 1", 0, 0, BUCKET_PROPERTY)); ru.update(200, new MyRow("update 2", 2, 0, BUCKET_PROPERTY)); ru.update(200, new MyRow("update 3", 3, 0, BUCKET_PROPERTY)); @@ -662,8 +662,8 @@ private void testNewBaseAndDelta(boolean use130Format) throws Exception { ru.delete(200, new MyRow("", 8, 0, BUCKET_PROPERTY)); ru.close(false); - ValidTxnList txnList = new ValidReadTxnList("200:" + Long.MAX_VALUE); - AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, txnList); + ValidWriteIdList writeIdList = new ValidReaderWriteIdList("testNewBaseAndDelta:200:" + Long.MAX_VALUE); + AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, writeIdList); assertEquals(new Path(root, "base_0000100"), directory.getBaseDirectory()); assertEquals(new Path(root, use130Format ? @@ -978,13 +978,13 @@ private void testNewBaseAndDelta(boolean use130Format) throws Exception { merger.close(); // try ignoring the 200 transaction and make sure it works still - ValidTxnList txns = new ValidReadTxnList("2000:200:200"); + ValidWriteIdList writeIds = new ValidReaderWriteIdList("testNewBaseAndDelta:2000:200:200"); //again 1st split is for base/ baseReader = OrcFile.createReader(basePath, OrcFile.readerOptions(conf)); merger = new OrcRawRecordMerger(conf, false, baseReader, false, BUCKET, - txns, new Reader.Options(), + writeIds, new Reader.Options(), new Path[] {deleteDeltaDir}, new OrcRawRecordMerger.Options().isCompacting(false)); assertEquals(null, merger.getMinKey()); @@ -1006,7 +1006,7 @@ private void testNewBaseAndDelta(boolean use130Format) throws Exception { OrcFile.readerOptions(conf)); merger = new OrcRawRecordMerger(conf, false, baseReader, false, BUCKET, - txns, new Reader.Options(), + writeIds, new Reader.Options(), new Path[] {deleteDeltaDir}, new OrcRawRecordMerger.Options().isCompacting(false)); assertEquals(null, merger.getMinKey()); @@ -1101,7 +1101,7 @@ public synchronized void addedRow(int rows) throws IOException { // write a delta AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) - .writingBase(false).minimumTransactionId(1).maximumTransactionId(1) + .writingBase(false).minimumWriteId(1).maximumWriteId(1) .bucket(BUCKET).inspector(inspector).filesystem(fs).recordIdColumn(5) .finalDestination(root); @@ -1119,7 +1119,7 @@ public synchronized void addedRow(int rows) throws IOException { ru.close(false);//this doesn't create a key index presumably because writerOptions are not set on 'options' // write a delta - options = options.minimumTransactionId(100).maximumTransactionId(100); + options = options.minimumWriteId(100).maximumWriteId(100); ru = of.getRecordUpdater(root, options); values = new String[]{null, null, "1.0", null, null, null, null, "3.1"}; for(int i=0; i < values.length - 1; ++i) { @@ -1221,7 +1221,7 @@ public synchronized void addedRow(int rows) throws IOException { // make 5 stripes with 2 rows each OrcRecordUpdater.OrcOptions options = (OrcRecordUpdater.OrcOptions) new OrcRecordUpdater.OrcOptions(conf) - .writingBase(true).minimumTransactionId(0).maximumTransactionId(0) + .writingBase(true).minimumWriteId(0).maximumWriteId(0) .bucket(BUCKET).inspector(inspector).filesystem(fs); final int BUCKET_PROPERTY = BucketCodec.V1.encode(options); @@ -1239,7 +1239,7 @@ public synchronized void addedRow(int rows) throws IOException { ru.close(false); // write a delta - options.writingBase(false).minimumTransactionId(1).maximumTransactionId(1) + options.writingBase(false).minimumWriteId(1).maximumWriteId(1) .recordIdColumn(5); ru = of.getRecordUpdater(root, options); values = new String[]{"0.0", null, null, "1.1", null, null, null, @@ -1253,7 +1253,7 @@ public synchronized void addedRow(int rows) throws IOException { ru.close(false); // write a delta - options.minimumTransactionId(100).maximumTransactionId(100); + options.minimumWriteId(100).maximumWriteId(100); ru = of.getRecordUpdater(root, options); values = new String[]{null, null, "1.0", null, null, null, null, "3.1"}; for(int i=0; i < values.length - 1; ++i) { @@ -1356,7 +1356,7 @@ public void testRecordReaderDelta() throws Exception { AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) .bucket(BUCKET).inspector(inspector).filesystem(fs) - .writingBase(false).minimumTransactionId(1).maximumTransactionId(1) + .writingBase(false).minimumWriteId(1).maximumWriteId(1) .finalDestination(root); RecordUpdater ru = of.getRecordUpdater(root, options); String[][] values = {new String[]{"a", "b", "c", "d", "e"}, new String[]{"f", "g", "h", "i", "j"}}; @@ -1366,7 +1366,7 @@ public void testRecordReaderDelta() throws Exception { ru.close(false); // write a delta - options.minimumTransactionId(2).maximumTransactionId(2); + options.minimumWriteId(2).maximumWriteId(2); ru = of.getRecordUpdater(root, options); for(int i=0; i < values[1].length; ++i) { ru.insert(2, new MyRow(values[1][i])); @@ -1429,7 +1429,7 @@ private void testRecordReaderIncompleteDelta(boolean use130Format) throws Except // write a base AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) - .writingBase(true).minimumTransactionId(0).maximumTransactionId(0) + .writingBase(true).minimumWriteId(0).maximumWriteId(0) .bucket(BUCKET).inspector(inspector).filesystem(fs).finalDestination(root); if(!use130Format) { options.statementId(-1); @@ -1442,8 +1442,8 @@ private void testRecordReaderIncompleteDelta(boolean use130Format) throws Except ru.close(false); // write a delta - options.writingBase(false).minimumTransactionId(10) - .maximumTransactionId(19); + options.writingBase(false).minimumWriteId(10) + .maximumWriteId(19); ru = of.getRecordUpdater(root, options); values = new String[]{"6", "7", "8"}; for(int i=0; i < values.length; ++i) { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java index 7914f0c..ef6dbbb 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java @@ -53,9 +53,9 @@ public void testAccessors() throws Exception { OrcStruct event = new OrcStruct(OrcRecordUpdater.FIELDS); event.setFieldValue(OrcRecordUpdater.OPERATION, new IntWritable(OrcRecordUpdater.INSERT_OPERATION)); - event.setFieldValue(OrcRecordUpdater.CURRENT_TRANSACTION, + event.setFieldValue(OrcRecordUpdater.CURRENT_WRITEID, new LongWritable(100)); - event.setFieldValue(OrcRecordUpdater.ORIGINAL_TRANSACTION, + event.setFieldValue(OrcRecordUpdater.ORIGINAL_WRITEID, new LongWritable(50)); event.setFieldValue(OrcRecordUpdater.BUCKET, new IntWritable(200)); event.setFieldValue(OrcRecordUpdater.ROW_ID, new LongWritable(300)); @@ -101,8 +101,8 @@ public void testWriter() throws Exception { .filesystem(fs) .bucket(10) .writingBase(false) - .minimumTransactionId(10) - .maximumTransactionId(19) + .minimumWriteId(10) + .maximumWriteId(19) .inspector(inspector) .reporter(Reporter.NULL) .finalDestination(root); @@ -210,8 +210,8 @@ public void testWriterTblProperties() throws Exception { .filesystem(fs) .bucket(10) .writingBase(false) - .minimumTransactionId(10) - .maximumTransactionId(19) + .minimumWriteId(10) + .maximumWriteId(19) .inspector(inspector) .reporter(Reporter.NULL) .finalDestination(root) @@ -252,8 +252,8 @@ public void testUpdates() throws Exception { .filesystem(fs) .bucket(bucket) .writingBase(false) - .minimumTransactionId(100) - .maximumTransactionId(100) + .minimumWriteId(100) + .maximumWriteId(100) .inspector(inspector) .reporter(Reporter.NULL) .recordIdColumn(1) diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java index 65508f4..c82a088 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; @@ -117,8 +117,8 @@ public void setup() throws Exception { .filesystem(fs) .bucket(bucket) .writingBase(false) - .minimumTransactionId(1) - .maximumTransactionId(NUM_OTID) + .minimumWriteId(1) + .maximumWriteId(NUM_OTID) .inspector(inspector) .reporter(Reporter.NULL) .recordIdColumn(1) @@ -141,7 +141,7 @@ public void setup() throws Exception { // Create a delete delta that has rowIds divisible by 2 but not by 3. This will produce // a delete delta file with 50,000 delete events. long currTxnId = NUM_OTID + 1; - options.minimumTransactionId(currTxnId).maximumTransactionId(currTxnId); + options.minimumWriteId(currTxnId).maximumWriteId(currTxnId); updater = new OrcRecordUpdater(root, options); for (long i = 1; i <= NUM_OTID; ++i) { for (long j = 0; j < NUM_ROWID_PER_OTID; j += 1) { @@ -154,7 +154,7 @@ public void setup() throws Exception { // Now, create a delete delta that has rowIds divisible by 3 but not by 2. This will produce // a delete delta file with 25,000 delete events. currTxnId = NUM_OTID + 2; - options.minimumTransactionId(currTxnId).maximumTransactionId(currTxnId); + options.minimumWriteId(currTxnId).maximumWriteId(currTxnId); updater = new OrcRecordUpdater(root, options); for (long i = 1; i <= NUM_OTID; ++i) { for (long j = 0; j < NUM_ROWID_PER_OTID; j += 1) { @@ -167,7 +167,7 @@ public void setup() throws Exception { // Now, create a delete delta that has rowIds divisible by both 3 and 2. This will produce // a delete delta file with 25,000 delete events. currTxnId = NUM_OTID + 3; - options.minimumTransactionId(currTxnId).maximumTransactionId(currTxnId); + options.minimumWriteId(currTxnId).maximumWriteId(currTxnId); updater = new OrcRecordUpdater(root, options); for (long i = 1; i <= NUM_OTID; ++i) { for (long j = 0; j < NUM_ROWID_PER_OTID; j += 1) { @@ -216,7 +216,7 @@ private void testVectorizedOrcAcidRowBatchReader(String deleteEventRegistry) thr List splits = getSplits(); // Mark one of the transactions as an exception to test that invalid transactions // are being handled properly. - conf.set(ValidTxnList.VALID_TXNS_KEY, "14:1:1:5"); // Exclude transaction 5 + conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, "tbl:14:1:1:5"); // Exclude transaction 5 VectorizedOrcAcidRowBatchReader vectorizedReader = new VectorizedOrcAcidRowBatchReader(splits.get(0), conf, Reporter.NULL, new VectorizedRowBatchCtx()); if (deleteEventRegistry.equals(ColumnizedDeleteEventRegistry.class.getName())) { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java index 3c007a7..a40ad24 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java @@ -164,7 +164,7 @@ public void testMergePathValidMoveWorkReturnsNewMoveWork() { TableDesc tableDesc = new TableDesc(); reset(mockWork); when(mockWork.getLoadTableWork()).thenReturn(new LoadTableDesc( - condOutputPath, tableDesc, null, null)); + condOutputPath, tableDesc, null)); newWork = GenMapRedUtils.mergeMovePaths(condInputPath, mockWork, lineageState); assertNotNull(newWork); assertNotEquals(newWork, mockWork); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java index c337fd5..3117faa 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java @@ -24,11 +24,15 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.TransactionalValidationListener; import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdRequest; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdResponse; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -41,6 +45,7 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TxnAbortedException; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -69,6 +74,7 @@ import java.io.IOException; import java.nio.file.Files; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -140,6 +146,7 @@ protected Table newTable(String dbName, String tableName, boolean partitioned, boolean isTemporary) throws TException { Table table = new Table(); + table.setTableType(TableType.MANAGED_TABLE.name()); table.setTableName(tableName); table.setDbName(dbName); table.setOwner("me"); @@ -150,6 +157,16 @@ protected Table newTable(String dbName, String tableName, boolean partitioned, table.setPartitionKeys(partKeys); } + // Set the table as transactional for compaction to work + if (parameters == null) { + parameters = new HashMap<>(); + } + parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true"); + if (sortCols != null) { + // Sort columns are not allowed for full ACID table. So, change it to insert-only table + parameters.put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, + TransactionalValidationListener.INSERTONLY_TRANSACTIONAL_PROPERTY); + } table.setParameters(parameters); if (isTemporary) table.setTemporary(true); @@ -181,6 +198,12 @@ protected long openTxn() throws MetaException { return txns.get(0); } + protected long allocateWriteId(String dbName, String tblName, long txnid) throws MetaException, TxnAbortedException, NoSuchTxnException { + AllocateTableWriteIdRequest awiRqst = new AllocateTableWriteIdRequest(Collections.singletonList(txnid), dbName, tblName); + AllocateTableWriteIdResponse awiResp = txnHandler.allocateTableWriteId(awiRqst); + return awiResp.getTxnToWriteIds().get(0).getWriteId(); + } + protected void addDeltaFile(Table t, Partition p, long minTxn, long maxTxn, int numRecords) throws Exception { addFile(t, p, minTxn, maxTxn, numRecords, FileType.DELTA, 2, true); @@ -220,15 +243,19 @@ protected void addBaseFile(Table t, Partition p, long maxTxn, int numRecords, in return paths; } - protected void burnThroughTransactions(int num) + protected void burnThroughTransactions(String dbName, String tblName, int num) throws MetaException, NoSuchTxnException, TxnAbortedException { - burnThroughTransactions(num, null, null); + burnThroughTransactions(dbName, tblName, num, null, null); } - protected void burnThroughTransactions(int num, Set open, Set aborted) + protected void burnThroughTransactions(String dbName, String tblName, int num, Set open, Set aborted) throws MetaException, NoSuchTxnException, TxnAbortedException { OpenTxnsResponse rsp = txnHandler.openTxns(new OpenTxnRequest(num, "me", "localhost")); + AllocateTableWriteIdRequest awiRqst = new AllocateTableWriteIdRequest(rsp.getTxn_ids(), dbName, tblName); + AllocateTableWriteIdResponse awiResp = txnHandler.allocateTableWriteId(awiRqst); + int i = 0; for (long tid : rsp.getTxn_ids()) { + assert(awiResp.getTxnToWriteIds().get(i++).getTxnId() == tid); if (aborted != null && aborted.contains(tid)) { txnHandler.abortTxn(new AbortTxnRequest(tid)); } else if (open == null || (open != null && !open.contains(tid))) { @@ -350,7 +377,7 @@ private void addFile(Table t, Partition p, long minTxn, long maxTxn, @Override public RawReader getRawReader(Configuration conf, boolean collapseEvents, int bucket, - ValidTxnList validTxnList, + ValidWriteIdList validWriteIdList, Path baseDirectory, Path... deltaDirectory) throws IOException { List filesToRead = new ArrayList(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java index db8e46c..3ca073c 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java @@ -76,7 +76,7 @@ public void cleanupAfterMajorTableCompaction() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addBaseFile(t, null, 25L, 25); - burnThroughTransactions(25); + burnThroughTransactions("default", "camtc", 25); CompactionRequest rqst = new CompactionRequest("default", "camtc", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -107,7 +107,7 @@ public void cleanupAfterMajorPartitionCompaction() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addBaseFile(t, p, 25L, 25); - burnThroughTransactions(25); + burnThroughTransactions("default", "campc", 25); CompactionRequest rqst = new CompactionRequest("default", "campc", CompactionType.MAJOR); rqst.setPartitionname("ds=today"); @@ -138,7 +138,7 @@ public void cleanupAfterMinorTableCompaction() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addDeltaFile(t, null, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "camitc", 25); CompactionRequest rqst = new CompactionRequest("default", "camitc", CompactionType.MINOR); txnHandler.compact(rqst); @@ -176,7 +176,7 @@ public void cleanupAfterMinorPartitionCompaction() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "camipc", 25); CompactionRequest rqst = new CompactionRequest("default", "camipc", CompactionType.MINOR); rqst.setPartitionname("ds=today"); @@ -214,7 +214,7 @@ public void blockedByLockTable() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addDeltaFile(t, null, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "bblt", 25); CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR); txnHandler.compact(rqst); @@ -251,7 +251,7 @@ public void blockedByLockPartition() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "bblp", 25); CompactionRequest rqst = new CompactionRequest("default", "bblp", CompactionType.MINOR); rqst.setPartitionname("ds=today"); @@ -296,7 +296,7 @@ public void notBlockedBySubsequentLock() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addDeltaFile(t, null, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "bblt", 25); CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR); txnHandler.compact(rqst); @@ -368,7 +368,7 @@ public void partitionNotBlockedBySubsequentLock() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "bblt", 25); CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR); rqst.setPartitionname("ds=today"); @@ -439,7 +439,7 @@ public void cleanupAfterMajorPartitionCompactionNoBase() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addBaseFile(t, p, 25L, 25); - burnThroughTransactions(25); + burnThroughTransactions("default", "campcnb", 25); CompactionRequest rqst = new CompactionRequest("default", "campcnb", CompactionType.MAJOR); rqst.setPartitionname("ds=today"); @@ -469,7 +469,7 @@ public void droppedTable() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addBaseFile(t, null, 25L, 25); - burnThroughTransactions(25); + burnThroughTransactions("default", "dt", 25); CompactionRequest rqst = new CompactionRequest("default", "dt", CompactionType.MINOR); txnHandler.compact(rqst); @@ -477,14 +477,14 @@ public void droppedTable() throws Exception { txnHandler.markCompacted(ci); txnHandler.setRunAs(ci.id, System.getProperty("user.name")); + // Drop table will clean the table entry from the compaction queue and hence cleaner have no effect ms.dropTable("default", "dt"); startCleaner(); // Check there are no compactions requests left. ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); - Assert.assertEquals(1, rsp.getCompactsSize()); - Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState())); + Assert.assertEquals(0, rsp.getCompactsSize()); } @Test @@ -496,7 +496,7 @@ public void droppedPartition() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addBaseFile(t, p, 25L, 25); - burnThroughTransactions(25); + burnThroughTransactions("default", "dp", 25); CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MAJOR); rqst.setPartitionname("ds=today"); @@ -505,14 +505,14 @@ public void droppedPartition() throws Exception { txnHandler.markCompacted(ci); txnHandler.setRunAs(ci.id, System.getProperty("user.name")); + // Drop partition will clean the partition entry from the compaction queue and hence cleaner have no effect ms.dropPartition("default", "dp", Collections.singletonList("today"), true); startCleaner(); // Check there are no compactions requests left. ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); - Assert.assertEquals(1, rsp.getCompactsSize()); - Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState())); + Assert.assertEquals(0, rsp.getCompactsSize()); } @Override boolean useHive130DeltaDirName() { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java index f35826e..35dc002 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java @@ -339,7 +339,7 @@ public void compactTableHighDeltaPct() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(23); + burnThroughTransactions("default", "cthdp", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -350,6 +350,8 @@ public void compactTableHighDeltaPct() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "cthdp", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -371,7 +373,7 @@ public void compactPartitionHighDeltaPct() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(23); + burnThroughTransactions("default", "cphdp", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -383,6 +385,8 @@ public void compactPartitionHighDeltaPct() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "cphdp", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -404,7 +408,7 @@ public void noCompactTableDeltaPctNotHighEnough() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(53); + burnThroughTransactions("default", "nctdpnhe", 53); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -415,6 +419,8 @@ public void noCompactTableDeltaPctNotHighEnough() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "nctdpnhe", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -440,7 +446,7 @@ public void compactTableTooManyDeltas() throws Exception { addDeltaFile(t, null, 210L, 210L, 1); addDeltaFile(t, null, 211L, 211L, 1); - burnThroughTransactions(210); + burnThroughTransactions("default", "cttmd", 210); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -451,6 +457,8 @@ public void compactTableTooManyDeltas() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "cttmd", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -481,7 +489,7 @@ public void compactPartitionTooManyDeltas() throws Exception { addDeltaFile(t, p, 210L, 210L, 1); addDeltaFile(t, p, 211L, 211L, 1); - burnThroughTransactions(210); + burnThroughTransactions("default", "cptmd", 210); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -493,6 +501,8 @@ public void compactPartitionTooManyDeltas() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "cptmd", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -514,7 +524,7 @@ public void noCompactTableNotEnoughDeltas() throws Exception { addDeltaFile(t, null, 201L, 205L, 5); addDeltaFile(t, null, 206L, 211L, 6); - burnThroughTransactions(210); + burnThroughTransactions("default", "nctned", 210); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -525,6 +535,8 @@ public void noCompactTableNotEnoughDeltas() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "nctned", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -550,7 +562,7 @@ public void chooseMajorOverMinorWhenBothValid() throws Exception { addDeltaFile(t, null, 300L, 310L, 11); addDeltaFile(t, null, 311L, 321L, 11); - burnThroughTransactions(320); + burnThroughTransactions("default", "cmomwbv", 320); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -561,6 +573,8 @@ public void chooseMajorOverMinorWhenBothValid() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "cmomwbv", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -590,7 +604,7 @@ public void enoughDeltasNoBase() throws Exception { addDeltaFile(t, p, 210L, 210L, 1); addDeltaFile(t, p, 211L, 211L, 1); - burnThroughTransactions(210); + burnThroughTransactions("default", "ednb", 210); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -602,6 +616,8 @@ public void enoughDeltasNoBase() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "ednb", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -624,7 +640,7 @@ public void twoTxnsOnSamePartitionGenerateOneCompactionRequest() throws Exceptio addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(23); + burnThroughTransactions("default", "ttospgocr", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -636,6 +652,8 @@ public void twoTxnsOnSamePartitionGenerateOneCompactionRequest() throws Exceptio LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "ttospgocr", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); txnid = openTxn(); @@ -648,6 +666,8 @@ public void twoTxnsOnSamePartitionGenerateOneCompactionRequest() throws Exceptio req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); res = txnHandler.lock(req); + writeid = allocateWriteId("default", "ttospgocr", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -670,7 +690,7 @@ public void noCompactTableDynamicPartitioning() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(23); + burnThroughTransactions("default", "nctdp", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -681,6 +701,8 @@ public void noCompactTableDynamicPartitioning() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "nctdp", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -698,7 +720,7 @@ public void dropTable() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(23); + burnThroughTransactions("default", "dt", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -709,6 +731,8 @@ public void dropTable() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "dt", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); ms.dropTable("default", "dt"); @@ -729,7 +753,7 @@ public void dropPartition() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(23); + burnThroughTransactions("default", "dp", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -741,6 +765,8 @@ public void dropPartition() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "dp", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); ms.dropPartition("default", "dp", Collections.singletonList("today"), true); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java index 0638126..9d3a79f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java @@ -236,7 +236,7 @@ public void sortedTable() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addDeltaFile(t, null, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "st", 25); CompactionRequest rqst = new CompactionRequest("default", "st", CompactionType.MINOR); txnHandler.compact(rqst); @@ -262,7 +262,7 @@ public void sortedPartition() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "sp", 25); CompactionRequest rqst = new CompactionRequest("default", "sp", CompactionType.MINOR); rqst.setPartitionname("ds=today"); @@ -285,7 +285,7 @@ public void minorTableWithBase() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "mtwb", 25); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR); txnHandler.compact(rqst); @@ -344,7 +344,7 @@ public void minorWithOpenInMiddle() throws Exception { addDeltaFile(t, null, 23L, 25L, 3); addLengthFile(t, null, 23L, 25L, 3); addDeltaFile(t, null, 26L, 27L, 2); - burnThroughTransactions(27, new HashSet(Arrays.asList(23L)), null); + burnThroughTransactions("default", "mtwb", 27, new HashSet(Arrays.asList(23L)), null); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR); txnHandler.compact(rqst); @@ -380,7 +380,7 @@ public void minorWithAborted() throws Exception { addDeltaFile(t, null, 23L, 25L, 3); addLengthFile(t, null, 23L, 25L, 3); addDeltaFile(t, null, 26L, 27L, 2); - burnThroughTransactions(27, null, new HashSet(Arrays.asList(24L, 25L))); + burnThroughTransactions("default", "mtwb", 27, null, new HashSet(Arrays.asList(24L, 25L))); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR); txnHandler.compact(rqst); @@ -416,7 +416,7 @@ public void minorPartitionWithBase() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "mpwb", 25); CompactionRequest rqst = new CompactionRequest("default", "mpwb", CompactionType.MINOR); rqst.setPartitionname("ds=today"); @@ -469,7 +469,7 @@ public void minorTableNoBase() throws Exception { addDeltaFile(t, null, 1L, 2L, 2); addDeltaFile(t, null, 3L, 4L, 2); - burnThroughTransactions(5); + burnThroughTransactions("default", "mtnb", 5); CompactionRequest rqst = new CompactionRequest("default", "mtnb", CompactionType.MINOR); txnHandler.compact(rqst); @@ -522,7 +522,7 @@ public void majorTableWithBase() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "matwb", 25); CompactionRequest rqst = new CompactionRequest("default", "matwb", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -598,7 +598,7 @@ private void compactNoBaseLotsOfDeltas(CompactionType type) throws Exception { * and then the 'requested' * minor compaction to combine delta_21_23, delta_25_33 and delta_35_35 to make delta_21_35 * or major compaction to create base_35*/ - burnThroughTransactions(35); + burnThroughTransactions("default", "mapwb", 35); CompactionRequest rqst = new CompactionRequest("default", "mapwb", type); rqst.setPartitionname("ds=today"); txnHandler.compact(rqst); @@ -690,7 +690,7 @@ public void majorPartitionWithBase() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "mapwb", 25); CompactionRequest rqst = new CompactionRequest("default", "mapwb", CompactionType.MAJOR); rqst.setPartitionname("ds=today"); @@ -734,7 +734,7 @@ public void majorTableNoBase() throws Exception { addDeltaFile(t, null, 1L, 2L, 2); addDeltaFile(t, null, 3L, 4L, 2); - burnThroughTransactions(4); + burnThroughTransactions("default", "matnb", 4); CompactionRequest rqst = new CompactionRequest("default", "matnb", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -778,7 +778,7 @@ public void majorTableLegacy() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "matl", 25); CompactionRequest rqst = new CompactionRequest("default", "matl", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -822,7 +822,7 @@ public void minorTableLegacy() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "mtl", 25); CompactionRequest rqst = new CompactionRequest("default", "mtl", CompactionType.MINOR); txnHandler.compact(rqst); @@ -865,7 +865,7 @@ public void majorPartitionWithBaseMissingBuckets() throws Exception { addDeltaFile(t, p, 21L, 22L, 2, 2, false); addDeltaFile(t, p, 23L, 26L, 4); - burnThroughTransactions(27); + burnThroughTransactions("default", "mapwbmb", 27); CompactionRequest rqst = new CompactionRequest("default", "mapwbmb", CompactionType.MAJOR); rqst.setPartitionname("ds=today"); @@ -919,7 +919,7 @@ public void majorWithOpenInMiddle() throws Exception { addDeltaFile(t, null, 23L, 25L, 3); addLengthFile(t, null, 23L, 25L, 3); addDeltaFile(t, null, 26L, 27L, 2); - burnThroughTransactions(27, new HashSet(Arrays.asList(23L)), null); + burnThroughTransactions("default", "mtwb", 27, new HashSet(Arrays.asList(23L)), null); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -955,7 +955,7 @@ public void majorWithAborted() throws Exception { addDeltaFile(t, null, 23L, 25L, 3); addLengthFile(t, null, 23L, 25L, 3); addDeltaFile(t, null, 26L, 27L, 2); - burnThroughTransactions(27, null, new HashSet(Arrays.asList(24L, 25L))); + burnThroughTransactions("default", "mtwb", 27, null, new HashSet(Arrays.asList(24L, 25L))); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -991,19 +991,19 @@ public void droppedTable() throws Exception { addDeltaFile(t, null, 1L, 2L, 2); addDeltaFile(t, null, 3L, 4L, 2); - burnThroughTransactions(4); + burnThroughTransactions("default", "dt", 4); CompactionRequest rqst = new CompactionRequest("default", "dt", CompactionType.MAJOR); txnHandler.compact(rqst); + // Drop table will clean the table entry from the compaction queue and hence worker have no effect ms.dropTable("default", "dt"); startWorker(); ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); List compacts = rsp.getCompacts(); - Assert.assertEquals(1, compacts.size()); - Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(compacts.get(0).getState())); + Assert.assertEquals(0, compacts.size()); } @Test @@ -1015,20 +1015,20 @@ public void droppedPartition() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "dp", 25); CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MINOR); rqst.setPartitionname("ds=today"); txnHandler.compact(rqst); + // Drop partition will clean the partition entry from the compaction queue and hence worker have no effect ms.dropPartition("default", "dp", Collections.singletonList("today"), true); startWorker(); ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); List compacts = rsp.getCompacts(); - Assert.assertEquals(1, compacts.size()); - Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState())); + Assert.assertEquals(0, compacts.size()); } @After diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index af0fd6b..d97b05b 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1049; - ::apache::thrift::protocol::TType _etype1052; - xfer += iprot->readListBegin(_etype1052, _size1049); - this->success.resize(_size1049); - uint32_t _i1053; - for (_i1053 = 0; _i1053 < _size1049; ++_i1053) + uint32_t _size1091; + ::apache::thrift::protocol::TType _etype1094; + xfer += iprot->readListBegin(_etype1094, _size1091); + this->success.resize(_size1091); + uint32_t _i1095; + for (_i1095 = 0; _i1095 < _size1091; ++_i1095) { - xfer += iprot->readString(this->success[_i1053]); + xfer += iprot->readString(this->success[_i1095]); } xfer += iprot->readListEnd(); } @@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1054; - for (_iter1054 = this->success.begin(); _iter1054 != this->success.end(); ++_iter1054) + std::vector ::const_iterator _iter1096; + for (_iter1096 = this->success.begin(); _iter1096 != this->success.end(); ++_iter1096) { - xfer += oprot->writeString((*_iter1054)); + xfer += oprot->writeString((*_iter1096)); } xfer += oprot->writeListEnd(); } @@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1055; - ::apache::thrift::protocol::TType _etype1058; - xfer += iprot->readListBegin(_etype1058, _size1055); - (*(this->success)).resize(_size1055); - uint32_t _i1059; - for (_i1059 = 0; _i1059 < _size1055; ++_i1059) + uint32_t _size1097; + ::apache::thrift::protocol::TType _etype1100; + xfer += iprot->readListBegin(_etype1100, _size1097); + (*(this->success)).resize(_size1097); + uint32_t _i1101; + for (_i1101 = 0; _i1101 < _size1097; ++_i1101) { - xfer += iprot->readString((*(this->success))[_i1059]); + xfer += iprot->readString((*(this->success))[_i1101]); } xfer += iprot->readListEnd(); } @@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1060; - ::apache::thrift::protocol::TType _etype1063; - xfer += iprot->readListBegin(_etype1063, _size1060); - this->success.resize(_size1060); - uint32_t _i1064; - for (_i1064 = 0; _i1064 < _size1060; ++_i1064) + uint32_t _size1102; + ::apache::thrift::protocol::TType _etype1105; + xfer += iprot->readListBegin(_etype1105, _size1102); + this->success.resize(_size1102); + uint32_t _i1106; + for (_i1106 = 0; _i1106 < _size1102; ++_i1106) { - xfer += iprot->readString(this->success[_i1064]); + xfer += iprot->readString(this->success[_i1106]); } xfer += iprot->readListEnd(); } @@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1065; - for (_iter1065 = this->success.begin(); _iter1065 != this->success.end(); ++_iter1065) + std::vector ::const_iterator _iter1107; + for (_iter1107 = this->success.begin(); _iter1107 != this->success.end(); ++_iter1107) { - xfer += oprot->writeString((*_iter1065)); + xfer += oprot->writeString((*_iter1107)); } xfer += oprot->writeListEnd(); } @@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1066; - ::apache::thrift::protocol::TType _etype1069; - xfer += iprot->readListBegin(_etype1069, _size1066); - (*(this->success)).resize(_size1066); - uint32_t _i1070; - for (_i1070 = 0; _i1070 < _size1066; ++_i1070) + uint32_t _size1108; + ::apache::thrift::protocol::TType _etype1111; + xfer += iprot->readListBegin(_etype1111, _size1108); + (*(this->success)).resize(_size1108); + uint32_t _i1112; + for (_i1112 = 0; _i1112 < _size1108; ++_i1112) { - xfer += iprot->readString((*(this->success))[_i1070]); + xfer += iprot->readString((*(this->success))[_i1112]); } xfer += iprot->readListEnd(); } @@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1071; - ::apache::thrift::protocol::TType _ktype1072; - ::apache::thrift::protocol::TType _vtype1073; - xfer += iprot->readMapBegin(_ktype1072, _vtype1073, _size1071); - uint32_t _i1075; - for (_i1075 = 0; _i1075 < _size1071; ++_i1075) + uint32_t _size1113; + ::apache::thrift::protocol::TType _ktype1114; + ::apache::thrift::protocol::TType _vtype1115; + xfer += iprot->readMapBegin(_ktype1114, _vtype1115, _size1113); + uint32_t _i1117; + for (_i1117 = 0; _i1117 < _size1113; ++_i1117) { - std::string _key1076; - xfer += iprot->readString(_key1076); - Type& _val1077 = this->success[_key1076]; - xfer += _val1077.read(iprot); + std::string _key1118; + xfer += iprot->readString(_key1118); + Type& _val1119 = this->success[_key1118]; + xfer += _val1119.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1078; - for (_iter1078 = this->success.begin(); _iter1078 != this->success.end(); ++_iter1078) + std::map ::const_iterator _iter1120; + for (_iter1120 = this->success.begin(); _iter1120 != this->success.end(); ++_iter1120) { - xfer += oprot->writeString(_iter1078->first); - xfer += _iter1078->second.write(oprot); + xfer += oprot->writeString(_iter1120->first); + xfer += _iter1120->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1079; - ::apache::thrift::protocol::TType _ktype1080; - ::apache::thrift::protocol::TType _vtype1081; - xfer += iprot->readMapBegin(_ktype1080, _vtype1081, _size1079); - uint32_t _i1083; - for (_i1083 = 0; _i1083 < _size1079; ++_i1083) + uint32_t _size1121; + ::apache::thrift::protocol::TType _ktype1122; + ::apache::thrift::protocol::TType _vtype1123; + xfer += iprot->readMapBegin(_ktype1122, _vtype1123, _size1121); + uint32_t _i1125; + for (_i1125 = 0; _i1125 < _size1121; ++_i1125) { - std::string _key1084; - xfer += iprot->readString(_key1084); - Type& _val1085 = (*(this->success))[_key1084]; - xfer += _val1085.read(iprot); + std::string _key1126; + xfer += iprot->readString(_key1126); + Type& _val1127 = (*(this->success))[_key1126]; + xfer += _val1127.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1086; - ::apache::thrift::protocol::TType _etype1089; - xfer += iprot->readListBegin(_etype1089, _size1086); - this->success.resize(_size1086); - uint32_t _i1090; - for (_i1090 = 0; _i1090 < _size1086; ++_i1090) + uint32_t _size1128; + ::apache::thrift::protocol::TType _etype1131; + xfer += iprot->readListBegin(_etype1131, _size1128); + this->success.resize(_size1128); + uint32_t _i1132; + for (_i1132 = 0; _i1132 < _size1128; ++_i1132) { - xfer += this->success[_i1090].read(iprot); + xfer += this->success[_i1132].read(iprot); } xfer += iprot->readListEnd(); } @@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1091; - for (_iter1091 = this->success.begin(); _iter1091 != this->success.end(); ++_iter1091) + std::vector ::const_iterator _iter1133; + for (_iter1133 = this->success.begin(); _iter1133 != this->success.end(); ++_iter1133) { - xfer += (*_iter1091).write(oprot); + xfer += (*_iter1133).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1092; - ::apache::thrift::protocol::TType _etype1095; - xfer += iprot->readListBegin(_etype1095, _size1092); - (*(this->success)).resize(_size1092); - uint32_t _i1096; - for (_i1096 = 0; _i1096 < _size1092; ++_i1096) + uint32_t _size1134; + ::apache::thrift::protocol::TType _etype1137; + xfer += iprot->readListBegin(_etype1137, _size1134); + (*(this->success)).resize(_size1134); + uint32_t _i1138; + for (_i1138 = 0; _i1138 < _size1134; ++_i1138) { - xfer += (*(this->success))[_i1096].read(iprot); + xfer += (*(this->success))[_i1138].read(iprot); } xfer += iprot->readListEnd(); } @@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1097; - ::apache::thrift::protocol::TType _etype1100; - xfer += iprot->readListBegin(_etype1100, _size1097); - this->success.resize(_size1097); - uint32_t _i1101; - for (_i1101 = 0; _i1101 < _size1097; ++_i1101) + uint32_t _size1139; + ::apache::thrift::protocol::TType _etype1142; + xfer += iprot->readListBegin(_etype1142, _size1139); + this->success.resize(_size1139); + uint32_t _i1143; + for (_i1143 = 0; _i1143 < _size1139; ++_i1143) { - xfer += this->success[_i1101].read(iprot); + xfer += this->success[_i1143].read(iprot); } xfer += iprot->readListEnd(); } @@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1102; - for (_iter1102 = this->success.begin(); _iter1102 != this->success.end(); ++_iter1102) + std::vector ::const_iterator _iter1144; + for (_iter1144 = this->success.begin(); _iter1144 != this->success.end(); ++_iter1144) { - xfer += (*_iter1102).write(oprot); + xfer += (*_iter1144).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1103; - ::apache::thrift::protocol::TType _etype1106; - xfer += iprot->readListBegin(_etype1106, _size1103); - (*(this->success)).resize(_size1103); - uint32_t _i1107; - for (_i1107 = 0; _i1107 < _size1103; ++_i1107) + uint32_t _size1145; + ::apache::thrift::protocol::TType _etype1148; + xfer += iprot->readListBegin(_etype1148, _size1145); + (*(this->success)).resize(_size1145); + uint32_t _i1149; + for (_i1149 = 0; _i1149 < _size1145; ++_i1149) { - xfer += (*(this->success))[_i1107].read(iprot); + xfer += (*(this->success))[_i1149].read(iprot); } xfer += iprot->readListEnd(); } @@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1108; - ::apache::thrift::protocol::TType _etype1111; - xfer += iprot->readListBegin(_etype1111, _size1108); - this->success.resize(_size1108); - uint32_t _i1112; - for (_i1112 = 0; _i1112 < _size1108; ++_i1112) + uint32_t _size1150; + ::apache::thrift::protocol::TType _etype1153; + xfer += iprot->readListBegin(_etype1153, _size1150); + this->success.resize(_size1150); + uint32_t _i1154; + for (_i1154 = 0; _i1154 < _size1150; ++_i1154) { - xfer += this->success[_i1112].read(iprot); + xfer += this->success[_i1154].read(iprot); } xfer += iprot->readListEnd(); } @@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1113; - for (_iter1113 = this->success.begin(); _iter1113 != this->success.end(); ++_iter1113) + std::vector ::const_iterator _iter1155; + for (_iter1155 = this->success.begin(); _iter1155 != this->success.end(); ++_iter1155) { - xfer += (*_iter1113).write(oprot); + xfer += (*_iter1155).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1114; - ::apache::thrift::protocol::TType _etype1117; - xfer += iprot->readListBegin(_etype1117, _size1114); - (*(this->success)).resize(_size1114); - uint32_t _i1118; - for (_i1118 = 0; _i1118 < _size1114; ++_i1118) + uint32_t _size1156; + ::apache::thrift::protocol::TType _etype1159; + xfer += iprot->readListBegin(_etype1159, _size1156); + (*(this->success)).resize(_size1156); + uint32_t _i1160; + for (_i1160 = 0; _i1160 < _size1156; ++_i1160) { - xfer += (*(this->success))[_i1118].read(iprot); + xfer += (*(this->success))[_i1160].read(iprot); } xfer += iprot->readListEnd(); } @@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1119; - ::apache::thrift::protocol::TType _etype1122; - xfer += iprot->readListBegin(_etype1122, _size1119); - this->success.resize(_size1119); - uint32_t _i1123; - for (_i1123 = 0; _i1123 < _size1119; ++_i1123) + uint32_t _size1161; + ::apache::thrift::protocol::TType _etype1164; + xfer += iprot->readListBegin(_etype1164, _size1161); + this->success.resize(_size1161); + uint32_t _i1165; + for (_i1165 = 0; _i1165 < _size1161; ++_i1165) { - xfer += this->success[_i1123].read(iprot); + xfer += this->success[_i1165].read(iprot); } xfer += iprot->readListEnd(); } @@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1124; - for (_iter1124 = this->success.begin(); _iter1124 != this->success.end(); ++_iter1124) + std::vector ::const_iterator _iter1166; + for (_iter1166 = this->success.begin(); _iter1166 != this->success.end(); ++_iter1166) { - xfer += (*_iter1124).write(oprot); + xfer += (*_iter1166).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1125; - ::apache::thrift::protocol::TType _etype1128; - xfer += iprot->readListBegin(_etype1128, _size1125); - (*(this->success)).resize(_size1125); - uint32_t _i1129; - for (_i1129 = 0; _i1129 < _size1125; ++_i1129) + uint32_t _size1167; + ::apache::thrift::protocol::TType _etype1170; + xfer += iprot->readListBegin(_etype1170, _size1167); + (*(this->success)).resize(_size1167); + uint32_t _i1171; + for (_i1171 = 0; _i1171 < _size1167; ++_i1171) { - xfer += (*(this->success))[_i1129].read(iprot); + xfer += (*(this->success))[_i1171].read(iprot); } xfer += iprot->readListEnd(); } @@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size1130; - ::apache::thrift::protocol::TType _etype1133; - xfer += iprot->readListBegin(_etype1133, _size1130); - this->primaryKeys.resize(_size1130); - uint32_t _i1134; - for (_i1134 = 0; _i1134 < _size1130; ++_i1134) + uint32_t _size1172; + ::apache::thrift::protocol::TType _etype1175; + xfer += iprot->readListBegin(_etype1175, _size1172); + this->primaryKeys.resize(_size1172); + uint32_t _i1176; + for (_i1176 = 0; _i1176 < _size1172; ++_i1176) { - xfer += this->primaryKeys[_i1134].read(iprot); + xfer += this->primaryKeys[_i1176].read(iprot); } xfer += iprot->readListEnd(); } @@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size1135; - ::apache::thrift::protocol::TType _etype1138; - xfer += iprot->readListBegin(_etype1138, _size1135); - this->foreignKeys.resize(_size1135); - uint32_t _i1139; - for (_i1139 = 0; _i1139 < _size1135; ++_i1139) + uint32_t _size1177; + ::apache::thrift::protocol::TType _etype1180; + xfer += iprot->readListBegin(_etype1180, _size1177); + this->foreignKeys.resize(_size1177); + uint32_t _i1181; + for (_i1181 = 0; _i1181 < _size1177; ++_i1181) { - xfer += this->foreignKeys[_i1139].read(iprot); + xfer += this->foreignKeys[_i1181].read(iprot); } xfer += iprot->readListEnd(); } @@ -4558,14 +4558,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size1140; - ::apache::thrift::protocol::TType _etype1143; - xfer += iprot->readListBegin(_etype1143, _size1140); - this->uniqueConstraints.resize(_size1140); - uint32_t _i1144; - for (_i1144 = 0; _i1144 < _size1140; ++_i1144) + uint32_t _size1182; + ::apache::thrift::protocol::TType _etype1185; + xfer += iprot->readListBegin(_etype1185, _size1182); + this->uniqueConstraints.resize(_size1182); + uint32_t _i1186; + for (_i1186 = 0; _i1186 < _size1182; ++_i1186) { - xfer += this->uniqueConstraints[_i1144].read(iprot); + xfer += this->uniqueConstraints[_i1186].read(iprot); } xfer += iprot->readListEnd(); } @@ -4578,14 +4578,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size1145; - ::apache::thrift::protocol::TType _etype1148; - xfer += iprot->readListBegin(_etype1148, _size1145); - this->notNullConstraints.resize(_size1145); - uint32_t _i1149; - for (_i1149 = 0; _i1149 < _size1145; ++_i1149) + uint32_t _size1187; + ::apache::thrift::protocol::TType _etype1190; + xfer += iprot->readListBegin(_etype1190, _size1187); + this->notNullConstraints.resize(_size1187); + uint32_t _i1191; + for (_i1191 = 0; _i1191 < _size1187; ++_i1191) { - xfer += this->notNullConstraints[_i1149].read(iprot); + xfer += this->notNullConstraints[_i1191].read(iprot); } xfer += iprot->readListEnd(); } @@ -4618,10 +4618,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter1150; - for (_iter1150 = this->primaryKeys.begin(); _iter1150 != this->primaryKeys.end(); ++_iter1150) + std::vector ::const_iterator _iter1192; + for (_iter1192 = this->primaryKeys.begin(); _iter1192 != this->primaryKeys.end(); ++_iter1192) { - xfer += (*_iter1150).write(oprot); + xfer += (*_iter1192).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4630,10 +4630,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter1151; - for (_iter1151 = this->foreignKeys.begin(); _iter1151 != this->foreignKeys.end(); ++_iter1151) + std::vector ::const_iterator _iter1193; + for (_iter1193 = this->foreignKeys.begin(); _iter1193 != this->foreignKeys.end(); ++_iter1193) { - xfer += (*_iter1151).write(oprot); + xfer += (*_iter1193).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4642,10 +4642,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter1152; - for (_iter1152 = this->uniqueConstraints.begin(); _iter1152 != this->uniqueConstraints.end(); ++_iter1152) + std::vector ::const_iterator _iter1194; + for (_iter1194 = this->uniqueConstraints.begin(); _iter1194 != this->uniqueConstraints.end(); ++_iter1194) { - xfer += (*_iter1152).write(oprot); + xfer += (*_iter1194).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4654,10 +4654,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter1153; - for (_iter1153 = this->notNullConstraints.begin(); _iter1153 != this->notNullConstraints.end(); ++_iter1153) + std::vector ::const_iterator _iter1195; + for (_iter1195 = this->notNullConstraints.begin(); _iter1195 != this->notNullConstraints.end(); ++_iter1195) { - xfer += (*_iter1153).write(oprot); + xfer += (*_iter1195).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4685,10 +4685,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter1154; - for (_iter1154 = (*(this->primaryKeys)).begin(); _iter1154 != (*(this->primaryKeys)).end(); ++_iter1154) + std::vector ::const_iterator _iter1196; + for (_iter1196 = (*(this->primaryKeys)).begin(); _iter1196 != (*(this->primaryKeys)).end(); ++_iter1196) { - xfer += (*_iter1154).write(oprot); + xfer += (*_iter1196).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4697,10 +4697,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter1155; - for (_iter1155 = (*(this->foreignKeys)).begin(); _iter1155 != (*(this->foreignKeys)).end(); ++_iter1155) + std::vector ::const_iterator _iter1197; + for (_iter1197 = (*(this->foreignKeys)).begin(); _iter1197 != (*(this->foreignKeys)).end(); ++_iter1197) { - xfer += (*_iter1155).write(oprot); + xfer += (*_iter1197).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4709,10 +4709,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->uniqueConstraints)).size())); - std::vector ::const_iterator _iter1156; - for (_iter1156 = (*(this->uniqueConstraints)).begin(); _iter1156 != (*(this->uniqueConstraints)).end(); ++_iter1156) + std::vector ::const_iterator _iter1198; + for (_iter1198 = (*(this->uniqueConstraints)).begin(); _iter1198 != (*(this->uniqueConstraints)).end(); ++_iter1198) { - xfer += (*_iter1156).write(oprot); + xfer += (*_iter1198).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4721,10 +4721,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->notNullConstraints)).size())); - std::vector ::const_iterator _iter1157; - for (_iter1157 = (*(this->notNullConstraints)).begin(); _iter1157 != (*(this->notNullConstraints)).end(); ++_iter1157) + std::vector ::const_iterator _iter1199; + for (_iter1199 = (*(this->notNullConstraints)).begin(); _iter1199 != (*(this->notNullConstraints)).end(); ++_iter1199) { - xfer += (*_iter1157).write(oprot); + xfer += (*_iter1199).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6478,14 +6478,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size1158; - ::apache::thrift::protocol::TType _etype1161; - xfer += iprot->readListBegin(_etype1161, _size1158); - this->partNames.resize(_size1158); - uint32_t _i1162; - for (_i1162 = 0; _i1162 < _size1158; ++_i1162) + uint32_t _size1200; + ::apache::thrift::protocol::TType _etype1203; + xfer += iprot->readListBegin(_etype1203, _size1200); + this->partNames.resize(_size1200); + uint32_t _i1204; + for (_i1204 = 0; _i1204 < _size1200; ++_i1204) { - xfer += iprot->readString(this->partNames[_i1162]); + xfer += iprot->readString(this->partNames[_i1204]); } xfer += iprot->readListEnd(); } @@ -6522,10 +6522,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter1163; - for (_iter1163 = this->partNames.begin(); _iter1163 != this->partNames.end(); ++_iter1163) + std::vector ::const_iterator _iter1205; + for (_iter1205 = this->partNames.begin(); _iter1205 != this->partNames.end(); ++_iter1205) { - xfer += oprot->writeString((*_iter1163)); + xfer += oprot->writeString((*_iter1205)); } xfer += oprot->writeListEnd(); } @@ -6557,10 +6557,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->partNames)).size())); - std::vector ::const_iterator _iter1164; - for (_iter1164 = (*(this->partNames)).begin(); _iter1164 != (*(this->partNames)).end(); ++_iter1164) + std::vector ::const_iterator _iter1206; + for (_iter1206 = (*(this->partNames)).begin(); _iter1206 != (*(this->partNames)).end(); ++_iter1206) { - xfer += oprot->writeString((*_iter1164)); + xfer += oprot->writeString((*_iter1206)); } xfer += oprot->writeListEnd(); } @@ -6804,14 +6804,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1165; - ::apache::thrift::protocol::TType _etype1168; - xfer += iprot->readListBegin(_etype1168, _size1165); - this->success.resize(_size1165); - uint32_t _i1169; - for (_i1169 = 0; _i1169 < _size1165; ++_i1169) + uint32_t _size1207; + ::apache::thrift::protocol::TType _etype1210; + xfer += iprot->readListBegin(_etype1210, _size1207); + this->success.resize(_size1207); + uint32_t _i1211; + for (_i1211 = 0; _i1211 < _size1207; ++_i1211) { - xfer += iprot->readString(this->success[_i1169]); + xfer += iprot->readString(this->success[_i1211]); } xfer += iprot->readListEnd(); } @@ -6850,10 +6850,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1170; - for (_iter1170 = this->success.begin(); _iter1170 != this->success.end(); ++_iter1170) + std::vector ::const_iterator _iter1212; + for (_iter1212 = this->success.begin(); _iter1212 != this->success.end(); ++_iter1212) { - xfer += oprot->writeString((*_iter1170)); + xfer += oprot->writeString((*_iter1212)); } xfer += oprot->writeListEnd(); } @@ -6898,14 +6898,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1171; - ::apache::thrift::protocol::TType _etype1174; - xfer += iprot->readListBegin(_etype1174, _size1171); - (*(this->success)).resize(_size1171); - uint32_t _i1175; - for (_i1175 = 0; _i1175 < _size1171; ++_i1175) + uint32_t _size1213; + ::apache::thrift::protocol::TType _etype1216; + xfer += iprot->readListBegin(_etype1216, _size1213); + (*(this->success)).resize(_size1213); + uint32_t _i1217; + for (_i1217 = 0; _i1217 < _size1213; ++_i1217) { - xfer += iprot->readString((*(this->success))[_i1175]); + xfer += iprot->readString((*(this->success))[_i1217]); } xfer += iprot->readListEnd(); } @@ -7075,14 +7075,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1176; - ::apache::thrift::protocol::TType _etype1179; - xfer += iprot->readListBegin(_etype1179, _size1176); - this->success.resize(_size1176); - uint32_t _i1180; - for (_i1180 = 0; _i1180 < _size1176; ++_i1180) + uint32_t _size1218; + ::apache::thrift::protocol::TType _etype1221; + xfer += iprot->readListBegin(_etype1221, _size1218); + this->success.resize(_size1218); + uint32_t _i1222; + for (_i1222 = 0; _i1222 < _size1218; ++_i1222) { - xfer += iprot->readString(this->success[_i1180]); + xfer += iprot->readString(this->success[_i1222]); } xfer += iprot->readListEnd(); } @@ -7121,10 +7121,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1181; - for (_iter1181 = this->success.begin(); _iter1181 != this->success.end(); ++_iter1181) + std::vector ::const_iterator _iter1223; + for (_iter1223 = this->success.begin(); _iter1223 != this->success.end(); ++_iter1223) { - xfer += oprot->writeString((*_iter1181)); + xfer += oprot->writeString((*_iter1223)); } xfer += oprot->writeListEnd(); } @@ -7169,14 +7169,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1182; - ::apache::thrift::protocol::TType _etype1185; - xfer += iprot->readListBegin(_etype1185, _size1182); - (*(this->success)).resize(_size1182); - uint32_t _i1186; - for (_i1186 = 0; _i1186 < _size1182; ++_i1186) + uint32_t _size1224; + ::apache::thrift::protocol::TType _etype1227; + xfer += iprot->readListBegin(_etype1227, _size1224); + (*(this->success)).resize(_size1224); + uint32_t _i1228; + for (_i1228 = 0; _i1228 < _size1224; ++_i1228) { - xfer += iprot->readString((*(this->success))[_i1186]); + xfer += iprot->readString((*(this->success))[_i1228]); } xfer += iprot->readListEnd(); } @@ -7314,14 +7314,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1187; - ::apache::thrift::protocol::TType _etype1190; - xfer += iprot->readListBegin(_etype1190, _size1187); - this->success.resize(_size1187); - uint32_t _i1191; - for (_i1191 = 0; _i1191 < _size1187; ++_i1191) + uint32_t _size1229; + ::apache::thrift::protocol::TType _etype1232; + xfer += iprot->readListBegin(_etype1232, _size1229); + this->success.resize(_size1229); + uint32_t _i1233; + for (_i1233 = 0; _i1233 < _size1229; ++_i1233) { - xfer += iprot->readString(this->success[_i1191]); + xfer += iprot->readString(this->success[_i1233]); } xfer += iprot->readListEnd(); } @@ -7360,10 +7360,10 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::write( xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1192; - for (_iter1192 = this->success.begin(); _iter1192 != this->success.end(); ++_iter1192) + std::vector ::const_iterator _iter1234; + for (_iter1234 = this->success.begin(); _iter1234 != this->success.end(); ++_iter1234) { - xfer += oprot->writeString((*_iter1192)); + xfer += oprot->writeString((*_iter1234)); } xfer += oprot->writeListEnd(); } @@ -7408,14 +7408,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1193; - ::apache::thrift::protocol::TType _etype1196; - xfer += iprot->readListBegin(_etype1196, _size1193); - (*(this->success)).resize(_size1193); - uint32_t _i1197; - for (_i1197 = 0; _i1197 < _size1193; ++_i1197) + uint32_t _size1235; + ::apache::thrift::protocol::TType _etype1238; + xfer += iprot->readListBegin(_etype1238, _size1235); + (*(this->success)).resize(_size1235); + uint32_t _i1239; + for (_i1239 = 0; _i1239 < _size1235; ++_i1239) { - xfer += iprot->readString((*(this->success))[_i1197]); + xfer += iprot->readString((*(this->success))[_i1239]); } xfer += iprot->readListEnd(); } @@ -7490,14 +7490,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size1198; - ::apache::thrift::protocol::TType _etype1201; - xfer += iprot->readListBegin(_etype1201, _size1198); - this->tbl_types.resize(_size1198); - uint32_t _i1202; - for (_i1202 = 0; _i1202 < _size1198; ++_i1202) + uint32_t _size1240; + ::apache::thrift::protocol::TType _etype1243; + xfer += iprot->readListBegin(_etype1243, _size1240); + this->tbl_types.resize(_size1240); + uint32_t _i1244; + for (_i1244 = 0; _i1244 < _size1240; ++_i1244) { - xfer += iprot->readString(this->tbl_types[_i1202]); + xfer += iprot->readString(this->tbl_types[_i1244]); } xfer += iprot->readListEnd(); } @@ -7534,10 +7534,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter1203; - for (_iter1203 = this->tbl_types.begin(); _iter1203 != this->tbl_types.end(); ++_iter1203) + std::vector ::const_iterator _iter1245; + for (_iter1245 = this->tbl_types.begin(); _iter1245 != this->tbl_types.end(); ++_iter1245) { - xfer += oprot->writeString((*_iter1203)); + xfer += oprot->writeString((*_iter1245)); } xfer += oprot->writeListEnd(); } @@ -7569,10 +7569,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter1204; - for (_iter1204 = (*(this->tbl_types)).begin(); _iter1204 != (*(this->tbl_types)).end(); ++_iter1204) + std::vector ::const_iterator _iter1246; + for (_iter1246 = (*(this->tbl_types)).begin(); _iter1246 != (*(this->tbl_types)).end(); ++_iter1246) { - xfer += oprot->writeString((*_iter1204)); + xfer += oprot->writeString((*_iter1246)); } xfer += oprot->writeListEnd(); } @@ -7613,14 +7613,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1205; - ::apache::thrift::protocol::TType _etype1208; - xfer += iprot->readListBegin(_etype1208, _size1205); - this->success.resize(_size1205); - uint32_t _i1209; - for (_i1209 = 0; _i1209 < _size1205; ++_i1209) + uint32_t _size1247; + ::apache::thrift::protocol::TType _etype1250; + xfer += iprot->readListBegin(_etype1250, _size1247); + this->success.resize(_size1247); + uint32_t _i1251; + for (_i1251 = 0; _i1251 < _size1247; ++_i1251) { - xfer += this->success[_i1209].read(iprot); + xfer += this->success[_i1251].read(iprot); } xfer += iprot->readListEnd(); } @@ -7659,10 +7659,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1210; - for (_iter1210 = this->success.begin(); _iter1210 != this->success.end(); ++_iter1210) + std::vector ::const_iterator _iter1252; + for (_iter1252 = this->success.begin(); _iter1252 != this->success.end(); ++_iter1252) { - xfer += (*_iter1210).write(oprot); + xfer += (*_iter1252).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7707,14 +7707,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1211; - ::apache::thrift::protocol::TType _etype1214; - xfer += iprot->readListBegin(_etype1214, _size1211); - (*(this->success)).resize(_size1211); - uint32_t _i1215; - for (_i1215 = 0; _i1215 < _size1211; ++_i1215) + uint32_t _size1253; + ::apache::thrift::protocol::TType _etype1256; + xfer += iprot->readListBegin(_etype1256, _size1253); + (*(this->success)).resize(_size1253); + uint32_t _i1257; + for (_i1257 = 0; _i1257 < _size1253; ++_i1257) { - xfer += (*(this->success))[_i1215].read(iprot); + xfer += (*(this->success))[_i1257].read(iprot); } xfer += iprot->readListEnd(); } @@ -7852,14 +7852,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1216; - ::apache::thrift::protocol::TType _etype1219; - xfer += iprot->readListBegin(_etype1219, _size1216); - this->success.resize(_size1216); - uint32_t _i1220; - for (_i1220 = 0; _i1220 < _size1216; ++_i1220) + uint32_t _size1258; + ::apache::thrift::protocol::TType _etype1261; + xfer += iprot->readListBegin(_etype1261, _size1258); + this->success.resize(_size1258); + uint32_t _i1262; + for (_i1262 = 0; _i1262 < _size1258; ++_i1262) { - xfer += iprot->readString(this->success[_i1220]); + xfer += iprot->readString(this->success[_i1262]); } xfer += iprot->readListEnd(); } @@ -7898,10 +7898,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1221; - for (_iter1221 = this->success.begin(); _iter1221 != this->success.end(); ++_iter1221) + std::vector ::const_iterator _iter1263; + for (_iter1263 = this->success.begin(); _iter1263 != this->success.end(); ++_iter1263) { - xfer += oprot->writeString((*_iter1221)); + xfer += oprot->writeString((*_iter1263)); } xfer += oprot->writeListEnd(); } @@ -7946,14 +7946,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1222; - ::apache::thrift::protocol::TType _etype1225; - xfer += iprot->readListBegin(_etype1225, _size1222); - (*(this->success)).resize(_size1222); - uint32_t _i1226; - for (_i1226 = 0; _i1226 < _size1222; ++_i1226) + uint32_t _size1264; + ::apache::thrift::protocol::TType _etype1267; + xfer += iprot->readListBegin(_etype1267, _size1264); + (*(this->success)).resize(_size1264); + uint32_t _i1268; + for (_i1268 = 0; _i1268 < _size1264; ++_i1268) { - xfer += iprot->readString((*(this->success))[_i1226]); + xfer += iprot->readString((*(this->success))[_i1268]); } xfer += iprot->readListEnd(); } @@ -8263,14 +8263,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1227; - ::apache::thrift::protocol::TType _etype1230; - xfer += iprot->readListBegin(_etype1230, _size1227); - this->tbl_names.resize(_size1227); - uint32_t _i1231; - for (_i1231 = 0; _i1231 < _size1227; ++_i1231) + uint32_t _size1269; + ::apache::thrift::protocol::TType _etype1272; + xfer += iprot->readListBegin(_etype1272, _size1269); + this->tbl_names.resize(_size1269); + uint32_t _i1273; + for (_i1273 = 0; _i1273 < _size1269; ++_i1273) { - xfer += iprot->readString(this->tbl_names[_i1231]); + xfer += iprot->readString(this->tbl_names[_i1273]); } xfer += iprot->readListEnd(); } @@ -8303,10 +8303,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1232; - for (_iter1232 = this->tbl_names.begin(); _iter1232 != this->tbl_names.end(); ++_iter1232) + std::vector ::const_iterator _iter1274; + for (_iter1274 = this->tbl_names.begin(); _iter1274 != this->tbl_names.end(); ++_iter1274) { - xfer += oprot->writeString((*_iter1232)); + xfer += oprot->writeString((*_iter1274)); } xfer += oprot->writeListEnd(); } @@ -8334,10 +8334,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1233; - for (_iter1233 = (*(this->tbl_names)).begin(); _iter1233 != (*(this->tbl_names)).end(); ++_iter1233) + std::vector ::const_iterator _iter1275; + for (_iter1275 = (*(this->tbl_names)).begin(); _iter1275 != (*(this->tbl_names)).end(); ++_iter1275) { - xfer += oprot->writeString((*_iter1233)); + xfer += oprot->writeString((*_iter1275)); } xfer += oprot->writeListEnd(); } @@ -8378,14 +8378,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1234; - ::apache::thrift::protocol::TType _etype1237; - xfer += iprot->readListBegin(_etype1237, _size1234); - this->success.resize(_size1234); - uint32_t _i1238; - for (_i1238 = 0; _i1238 < _size1234; ++_i1238) + uint32_t _size1276; + ::apache::thrift::protocol::TType _etype1279; + xfer += iprot->readListBegin(_etype1279, _size1276); + this->success.resize(_size1276); + uint32_t _i1280; + for (_i1280 = 0; _i1280 < _size1276; ++_i1280) { - xfer += this->success[_i1238].read(iprot); + xfer += this->success[_i1280].read(iprot); } xfer += iprot->readListEnd(); } @@ -8416,10 +8416,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1239; - for (_iter1239 = this->success.begin(); _iter1239 != this->success.end(); ++_iter1239) + std::vector
::const_iterator _iter1281; + for (_iter1281 = this->success.begin(); _iter1281 != this->success.end(); ++_iter1281) { - xfer += (*_iter1239).write(oprot); + xfer += (*_iter1281).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8460,14 +8460,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1240; - ::apache::thrift::protocol::TType _etype1243; - xfer += iprot->readListBegin(_etype1243, _size1240); - (*(this->success)).resize(_size1240); - uint32_t _i1244; - for (_i1244 = 0; _i1244 < _size1240; ++_i1244) + uint32_t _size1282; + ::apache::thrift::protocol::TType _etype1285; + xfer += iprot->readListBegin(_etype1285, _size1282); + (*(this->success)).resize(_size1282); + uint32_t _i1286; + for (_i1286 = 0; _i1286 < _size1282; ++_i1286) { - xfer += (*(this->success))[_i1244].read(iprot); + xfer += (*(this->success))[_i1286].read(iprot); } xfer += iprot->readListEnd(); } @@ -9000,14 +9000,14 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1245; - ::apache::thrift::protocol::TType _etype1248; - xfer += iprot->readListBegin(_etype1248, _size1245); - this->tbl_names.resize(_size1245); - uint32_t _i1249; - for (_i1249 = 0; _i1249 < _size1245; ++_i1249) + uint32_t _size1287; + ::apache::thrift::protocol::TType _etype1290; + xfer += iprot->readListBegin(_etype1290, _size1287); + this->tbl_names.resize(_size1287); + uint32_t _i1291; + for (_i1291 = 0; _i1291 < _size1287; ++_i1291) { - xfer += iprot->readString(this->tbl_names[_i1249]); + xfer += iprot->readString(this->tbl_names[_i1291]); } xfer += iprot->readListEnd(); } @@ -9040,10 +9040,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::write(: xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1250; - for (_iter1250 = this->tbl_names.begin(); _iter1250 != this->tbl_names.end(); ++_iter1250) + std::vector ::const_iterator _iter1292; + for (_iter1292 = this->tbl_names.begin(); _iter1292 != this->tbl_names.end(); ++_iter1292) { - xfer += oprot->writeString((*_iter1250)); + xfer += oprot->writeString((*_iter1292)); } xfer += oprot->writeListEnd(); } @@ -9071,10 +9071,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_pargs::write( xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1251; - for (_iter1251 = (*(this->tbl_names)).begin(); _iter1251 != (*(this->tbl_names)).end(); ++_iter1251) + std::vector ::const_iterator _iter1293; + for (_iter1293 = (*(this->tbl_names)).begin(); _iter1293 != (*(this->tbl_names)).end(); ++_iter1293) { - xfer += oprot->writeString((*_iter1251)); + xfer += oprot->writeString((*_iter1293)); } xfer += oprot->writeListEnd(); } @@ -9115,17 +9115,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::read( if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1252; - ::apache::thrift::protocol::TType _ktype1253; - ::apache::thrift::protocol::TType _vtype1254; - xfer += iprot->readMapBegin(_ktype1253, _vtype1254, _size1252); - uint32_t _i1256; - for (_i1256 = 0; _i1256 < _size1252; ++_i1256) + uint32_t _size1294; + ::apache::thrift::protocol::TType _ktype1295; + ::apache::thrift::protocol::TType _vtype1296; + xfer += iprot->readMapBegin(_ktype1295, _vtype1296, _size1294); + uint32_t _i1298; + for (_i1298 = 0; _i1298 < _size1294; ++_i1298) { - std::string _key1257; - xfer += iprot->readString(_key1257); - Materialization& _val1258 = this->success[_key1257]; - xfer += _val1258.read(iprot); + std::string _key1299; + xfer += iprot->readString(_key1299); + Materialization& _val1300 = this->success[_key1299]; + xfer += _val1300.read(iprot); } xfer += iprot->readMapEnd(); } @@ -9180,11 +9180,11 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::write xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1259; - for (_iter1259 = this->success.begin(); _iter1259 != this->success.end(); ++_iter1259) + std::map ::const_iterator _iter1301; + for (_iter1301 = this->success.begin(); _iter1301 != this->success.end(); ++_iter1301) { - xfer += oprot->writeString(_iter1259->first); - xfer += _iter1259->second.write(oprot); + xfer += oprot->writeString(_iter1301->first); + xfer += _iter1301->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -9237,17 +9237,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1260; - ::apache::thrift::protocol::TType _ktype1261; - ::apache::thrift::protocol::TType _vtype1262; - xfer += iprot->readMapBegin(_ktype1261, _vtype1262, _size1260); - uint32_t _i1264; - for (_i1264 = 0; _i1264 < _size1260; ++_i1264) + uint32_t _size1302; + ::apache::thrift::protocol::TType _ktype1303; + ::apache::thrift::protocol::TType _vtype1304; + xfer += iprot->readMapBegin(_ktype1303, _vtype1304, _size1302); + uint32_t _i1306; + for (_i1306 = 0; _i1306 < _size1302; ++_i1306) { - std::string _key1265; - xfer += iprot->readString(_key1265); - Materialization& _val1266 = (*(this->success))[_key1265]; - xfer += _val1266.read(iprot); + std::string _key1307; + xfer += iprot->readString(_key1307); + Materialization& _val1308 = (*(this->success))[_key1307]; + xfer += _val1308.read(iprot); } xfer += iprot->readMapEnd(); } @@ -9433,14 +9433,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1267; - ::apache::thrift::protocol::TType _etype1270; - xfer += iprot->readListBegin(_etype1270, _size1267); - this->success.resize(_size1267); - uint32_t _i1271; - for (_i1271 = 0; _i1271 < _size1267; ++_i1271) + uint32_t _size1309; + ::apache::thrift::protocol::TType _etype1312; + xfer += iprot->readListBegin(_etype1312, _size1309); + this->success.resize(_size1309); + uint32_t _i1313; + for (_i1313 = 0; _i1313 < _size1309; ++_i1313) { - xfer += iprot->readString(this->success[_i1271]); + xfer += iprot->readString(this->success[_i1313]); } xfer += iprot->readListEnd(); } @@ -9495,10 +9495,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1272; - for (_iter1272 = this->success.begin(); _iter1272 != this->success.end(); ++_iter1272) + std::vector ::const_iterator _iter1314; + for (_iter1314 = this->success.begin(); _iter1314 != this->success.end(); ++_iter1314) { - xfer += oprot->writeString((*_iter1272)); + xfer += oprot->writeString((*_iter1314)); } xfer += oprot->writeListEnd(); } @@ -9551,14 +9551,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1273; - ::apache::thrift::protocol::TType _etype1276; - xfer += iprot->readListBegin(_etype1276, _size1273); - (*(this->success)).resize(_size1273); - uint32_t _i1277; - for (_i1277 = 0; _i1277 < _size1273; ++_i1277) + uint32_t _size1315; + ::apache::thrift::protocol::TType _etype1318; + xfer += iprot->readListBegin(_etype1318, _size1315); + (*(this->success)).resize(_size1315); + uint32_t _i1319; + for (_i1319 = 0; _i1319 < _size1315; ++_i1319) { - xfer += iprot->readString((*(this->success))[_i1277]); + xfer += iprot->readString((*(this->success))[_i1319]); } xfer += iprot->readListEnd(); } @@ -10892,14 +10892,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1278; - ::apache::thrift::protocol::TType _etype1281; - xfer += iprot->readListBegin(_etype1281, _size1278); - this->new_parts.resize(_size1278); - uint32_t _i1282; - for (_i1282 = 0; _i1282 < _size1278; ++_i1282) + uint32_t _size1320; + ::apache::thrift::protocol::TType _etype1323; + xfer += iprot->readListBegin(_etype1323, _size1320); + this->new_parts.resize(_size1320); + uint32_t _i1324; + for (_i1324 = 0; _i1324 < _size1320; ++_i1324) { - xfer += this->new_parts[_i1282].read(iprot); + xfer += this->new_parts[_i1324].read(iprot); } xfer += iprot->readListEnd(); } @@ -10928,10 +10928,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1283; - for (_iter1283 = this->new_parts.begin(); _iter1283 != this->new_parts.end(); ++_iter1283) + std::vector ::const_iterator _iter1325; + for (_iter1325 = this->new_parts.begin(); _iter1325 != this->new_parts.end(); ++_iter1325) { - xfer += (*_iter1283).write(oprot); + xfer += (*_iter1325).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10955,10 +10955,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1284; - for (_iter1284 = (*(this->new_parts)).begin(); _iter1284 != (*(this->new_parts)).end(); ++_iter1284) + std::vector ::const_iterator _iter1326; + for (_iter1326 = (*(this->new_parts)).begin(); _iter1326 != (*(this->new_parts)).end(); ++_iter1326) { - xfer += (*_iter1284).write(oprot); + xfer += (*_iter1326).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11167,14 +11167,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1285; - ::apache::thrift::protocol::TType _etype1288; - xfer += iprot->readListBegin(_etype1288, _size1285); - this->new_parts.resize(_size1285); - uint32_t _i1289; - for (_i1289 = 0; _i1289 < _size1285; ++_i1289) + uint32_t _size1327; + ::apache::thrift::protocol::TType _etype1330; + xfer += iprot->readListBegin(_etype1330, _size1327); + this->new_parts.resize(_size1327); + uint32_t _i1331; + for (_i1331 = 0; _i1331 < _size1327; ++_i1331) { - xfer += this->new_parts[_i1289].read(iprot); + xfer += this->new_parts[_i1331].read(iprot); } xfer += iprot->readListEnd(); } @@ -11203,10 +11203,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1290; - for (_iter1290 = this->new_parts.begin(); _iter1290 != this->new_parts.end(); ++_iter1290) + std::vector ::const_iterator _iter1332; + for (_iter1332 = this->new_parts.begin(); _iter1332 != this->new_parts.end(); ++_iter1332) { - xfer += (*_iter1290).write(oprot); + xfer += (*_iter1332).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11230,10 +11230,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1291; - for (_iter1291 = (*(this->new_parts)).begin(); _iter1291 != (*(this->new_parts)).end(); ++_iter1291) + std::vector ::const_iterator _iter1333; + for (_iter1333 = (*(this->new_parts)).begin(); _iter1333 != (*(this->new_parts)).end(); ++_iter1333) { - xfer += (*_iter1291).write(oprot); + xfer += (*_iter1333).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11458,14 +11458,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1292; - ::apache::thrift::protocol::TType _etype1295; - xfer += iprot->readListBegin(_etype1295, _size1292); - this->part_vals.resize(_size1292); - uint32_t _i1296; - for (_i1296 = 0; _i1296 < _size1292; ++_i1296) + uint32_t _size1334; + ::apache::thrift::protocol::TType _etype1337; + xfer += iprot->readListBegin(_etype1337, _size1334); + this->part_vals.resize(_size1334); + uint32_t _i1338; + for (_i1338 = 0; _i1338 < _size1334; ++_i1338) { - xfer += iprot->readString(this->part_vals[_i1296]); + xfer += iprot->readString(this->part_vals[_i1338]); } xfer += iprot->readListEnd(); } @@ -11502,10 +11502,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1297; - for (_iter1297 = this->part_vals.begin(); _iter1297 != this->part_vals.end(); ++_iter1297) + std::vector ::const_iterator _iter1339; + for (_iter1339 = this->part_vals.begin(); _iter1339 != this->part_vals.end(); ++_iter1339) { - xfer += oprot->writeString((*_iter1297)); + xfer += oprot->writeString((*_iter1339)); } xfer += oprot->writeListEnd(); } @@ -11537,10 +11537,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1298; - for (_iter1298 = (*(this->part_vals)).begin(); _iter1298 != (*(this->part_vals)).end(); ++_iter1298) + std::vector ::const_iterator _iter1340; + for (_iter1340 = (*(this->part_vals)).begin(); _iter1340 != (*(this->part_vals)).end(); ++_iter1340) { - xfer += oprot->writeString((*_iter1298)); + xfer += oprot->writeString((*_iter1340)); } xfer += oprot->writeListEnd(); } @@ -12012,14 +12012,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1299; - ::apache::thrift::protocol::TType _etype1302; - xfer += iprot->readListBegin(_etype1302, _size1299); - this->part_vals.resize(_size1299); - uint32_t _i1303; - for (_i1303 = 0; _i1303 < _size1299; ++_i1303) + uint32_t _size1341; + ::apache::thrift::protocol::TType _etype1344; + xfer += iprot->readListBegin(_etype1344, _size1341); + this->part_vals.resize(_size1341); + uint32_t _i1345; + for (_i1345 = 0; _i1345 < _size1341; ++_i1345) { - xfer += iprot->readString(this->part_vals[_i1303]); + xfer += iprot->readString(this->part_vals[_i1345]); } xfer += iprot->readListEnd(); } @@ -12064,10 +12064,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1304; - for (_iter1304 = this->part_vals.begin(); _iter1304 != this->part_vals.end(); ++_iter1304) + std::vector ::const_iterator _iter1346; + for (_iter1346 = this->part_vals.begin(); _iter1346 != this->part_vals.end(); ++_iter1346) { - xfer += oprot->writeString((*_iter1304)); + xfer += oprot->writeString((*_iter1346)); } xfer += oprot->writeListEnd(); } @@ -12103,10 +12103,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1305; - for (_iter1305 = (*(this->part_vals)).begin(); _iter1305 != (*(this->part_vals)).end(); ++_iter1305) + std::vector ::const_iterator _iter1347; + for (_iter1347 = (*(this->part_vals)).begin(); _iter1347 != (*(this->part_vals)).end(); ++_iter1347) { - xfer += oprot->writeString((*_iter1305)); + xfer += oprot->writeString((*_iter1347)); } xfer += oprot->writeListEnd(); } @@ -12909,14 +12909,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1306; - ::apache::thrift::protocol::TType _etype1309; - xfer += iprot->readListBegin(_etype1309, _size1306); - this->part_vals.resize(_size1306); - uint32_t _i1310; - for (_i1310 = 0; _i1310 < _size1306; ++_i1310) + uint32_t _size1348; + ::apache::thrift::protocol::TType _etype1351; + xfer += iprot->readListBegin(_etype1351, _size1348); + this->part_vals.resize(_size1348); + uint32_t _i1352; + for (_i1352 = 0; _i1352 < _size1348; ++_i1352) { - xfer += iprot->readString(this->part_vals[_i1310]); + xfer += iprot->readString(this->part_vals[_i1352]); } xfer += iprot->readListEnd(); } @@ -12961,10 +12961,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1311; - for (_iter1311 = this->part_vals.begin(); _iter1311 != this->part_vals.end(); ++_iter1311) + std::vector ::const_iterator _iter1353; + for (_iter1353 = this->part_vals.begin(); _iter1353 != this->part_vals.end(); ++_iter1353) { - xfer += oprot->writeString((*_iter1311)); + xfer += oprot->writeString((*_iter1353)); } xfer += oprot->writeListEnd(); } @@ -13000,10 +13000,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1312; - for (_iter1312 = (*(this->part_vals)).begin(); _iter1312 != (*(this->part_vals)).end(); ++_iter1312) + std::vector ::const_iterator _iter1354; + for (_iter1354 = (*(this->part_vals)).begin(); _iter1354 != (*(this->part_vals)).end(); ++_iter1354) { - xfer += oprot->writeString((*_iter1312)); + xfer += oprot->writeString((*_iter1354)); } xfer += oprot->writeListEnd(); } @@ -13212,14 +13212,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1313; - ::apache::thrift::protocol::TType _etype1316; - xfer += iprot->readListBegin(_etype1316, _size1313); - this->part_vals.resize(_size1313); - uint32_t _i1317; - for (_i1317 = 0; _i1317 < _size1313; ++_i1317) + uint32_t _size1355; + ::apache::thrift::protocol::TType _etype1358; + xfer += iprot->readListBegin(_etype1358, _size1355); + this->part_vals.resize(_size1355); + uint32_t _i1359; + for (_i1359 = 0; _i1359 < _size1355; ++_i1359) { - xfer += iprot->readString(this->part_vals[_i1317]); + xfer += iprot->readString(this->part_vals[_i1359]); } xfer += iprot->readListEnd(); } @@ -13272,10 +13272,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1318; - for (_iter1318 = this->part_vals.begin(); _iter1318 != this->part_vals.end(); ++_iter1318) + std::vector ::const_iterator _iter1360; + for (_iter1360 = this->part_vals.begin(); _iter1360 != this->part_vals.end(); ++_iter1360) { - xfer += oprot->writeString((*_iter1318)); + xfer += oprot->writeString((*_iter1360)); } xfer += oprot->writeListEnd(); } @@ -13315,10 +13315,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1319; - for (_iter1319 = (*(this->part_vals)).begin(); _iter1319 != (*(this->part_vals)).end(); ++_iter1319) + std::vector ::const_iterator _iter1361; + for (_iter1361 = (*(this->part_vals)).begin(); _iter1361 != (*(this->part_vals)).end(); ++_iter1361) { - xfer += oprot->writeString((*_iter1319)); + xfer += oprot->writeString((*_iter1361)); } xfer += oprot->writeListEnd(); } @@ -14324,14 +14324,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1320; - ::apache::thrift::protocol::TType _etype1323; - xfer += iprot->readListBegin(_etype1323, _size1320); - this->part_vals.resize(_size1320); - uint32_t _i1324; - for (_i1324 = 0; _i1324 < _size1320; ++_i1324) + uint32_t _size1362; + ::apache::thrift::protocol::TType _etype1365; + xfer += iprot->readListBegin(_etype1365, _size1362); + this->part_vals.resize(_size1362); + uint32_t _i1366; + for (_i1366 = 0; _i1366 < _size1362; ++_i1366) { - xfer += iprot->readString(this->part_vals[_i1324]); + xfer += iprot->readString(this->part_vals[_i1366]); } xfer += iprot->readListEnd(); } @@ -14368,10 +14368,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1325; - for (_iter1325 = this->part_vals.begin(); _iter1325 != this->part_vals.end(); ++_iter1325) + std::vector ::const_iterator _iter1367; + for (_iter1367 = this->part_vals.begin(); _iter1367 != this->part_vals.end(); ++_iter1367) { - xfer += oprot->writeString((*_iter1325)); + xfer += oprot->writeString((*_iter1367)); } xfer += oprot->writeListEnd(); } @@ -14403,10 +14403,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1326; - for (_iter1326 = (*(this->part_vals)).begin(); _iter1326 != (*(this->part_vals)).end(); ++_iter1326) + std::vector ::const_iterator _iter1368; + for (_iter1368 = (*(this->part_vals)).begin(); _iter1368 != (*(this->part_vals)).end(); ++_iter1368) { - xfer += oprot->writeString((*_iter1326)); + xfer += oprot->writeString((*_iter1368)); } xfer += oprot->writeListEnd(); } @@ -14595,17 +14595,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1327; - ::apache::thrift::protocol::TType _ktype1328; - ::apache::thrift::protocol::TType _vtype1329; - xfer += iprot->readMapBegin(_ktype1328, _vtype1329, _size1327); - uint32_t _i1331; - for (_i1331 = 0; _i1331 < _size1327; ++_i1331) + uint32_t _size1369; + ::apache::thrift::protocol::TType _ktype1370; + ::apache::thrift::protocol::TType _vtype1371; + xfer += iprot->readMapBegin(_ktype1370, _vtype1371, _size1369); + uint32_t _i1373; + for (_i1373 = 0; _i1373 < _size1369; ++_i1373) { - std::string _key1332; - xfer += iprot->readString(_key1332); - std::string& _val1333 = this->partitionSpecs[_key1332]; - xfer += iprot->readString(_val1333); + std::string _key1374; + xfer += iprot->readString(_key1374); + std::string& _val1375 = this->partitionSpecs[_key1374]; + xfer += iprot->readString(_val1375); } xfer += iprot->readMapEnd(); } @@ -14666,11 +14666,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1334; - for (_iter1334 = this->partitionSpecs.begin(); _iter1334 != this->partitionSpecs.end(); ++_iter1334) + std::map ::const_iterator _iter1376; + for (_iter1376 = this->partitionSpecs.begin(); _iter1376 != this->partitionSpecs.end(); ++_iter1376) { - xfer += oprot->writeString(_iter1334->first); - xfer += oprot->writeString(_iter1334->second); + xfer += oprot->writeString(_iter1376->first); + xfer += oprot->writeString(_iter1376->second); } xfer += oprot->writeMapEnd(); } @@ -14710,11 +14710,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1335; - for (_iter1335 = (*(this->partitionSpecs)).begin(); _iter1335 != (*(this->partitionSpecs)).end(); ++_iter1335) + std::map ::const_iterator _iter1377; + for (_iter1377 = (*(this->partitionSpecs)).begin(); _iter1377 != (*(this->partitionSpecs)).end(); ++_iter1377) { - xfer += oprot->writeString(_iter1335->first); - xfer += oprot->writeString(_iter1335->second); + xfer += oprot->writeString(_iter1377->first); + xfer += oprot->writeString(_iter1377->second); } xfer += oprot->writeMapEnd(); } @@ -14959,17 +14959,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1336; - ::apache::thrift::protocol::TType _ktype1337; - ::apache::thrift::protocol::TType _vtype1338; - xfer += iprot->readMapBegin(_ktype1337, _vtype1338, _size1336); - uint32_t _i1340; - for (_i1340 = 0; _i1340 < _size1336; ++_i1340) + uint32_t _size1378; + ::apache::thrift::protocol::TType _ktype1379; + ::apache::thrift::protocol::TType _vtype1380; + xfer += iprot->readMapBegin(_ktype1379, _vtype1380, _size1378); + uint32_t _i1382; + for (_i1382 = 0; _i1382 < _size1378; ++_i1382) { - std::string _key1341; - xfer += iprot->readString(_key1341); - std::string& _val1342 = this->partitionSpecs[_key1341]; - xfer += iprot->readString(_val1342); + std::string _key1383; + xfer += iprot->readString(_key1383); + std::string& _val1384 = this->partitionSpecs[_key1383]; + xfer += iprot->readString(_val1384); } xfer += iprot->readMapEnd(); } @@ -15030,11 +15030,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1343; - for (_iter1343 = this->partitionSpecs.begin(); _iter1343 != this->partitionSpecs.end(); ++_iter1343) + std::map ::const_iterator _iter1385; + for (_iter1385 = this->partitionSpecs.begin(); _iter1385 != this->partitionSpecs.end(); ++_iter1385) { - xfer += oprot->writeString(_iter1343->first); - xfer += oprot->writeString(_iter1343->second); + xfer += oprot->writeString(_iter1385->first); + xfer += oprot->writeString(_iter1385->second); } xfer += oprot->writeMapEnd(); } @@ -15074,11 +15074,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1344; - for (_iter1344 = (*(this->partitionSpecs)).begin(); _iter1344 != (*(this->partitionSpecs)).end(); ++_iter1344) + std::map ::const_iterator _iter1386; + for (_iter1386 = (*(this->partitionSpecs)).begin(); _iter1386 != (*(this->partitionSpecs)).end(); ++_iter1386) { - xfer += oprot->writeString(_iter1344->first); - xfer += oprot->writeString(_iter1344->second); + xfer += oprot->writeString(_iter1386->first); + xfer += oprot->writeString(_iter1386->second); } xfer += oprot->writeMapEnd(); } @@ -15135,14 +15135,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1345; - ::apache::thrift::protocol::TType _etype1348; - xfer += iprot->readListBegin(_etype1348, _size1345); - this->success.resize(_size1345); - uint32_t _i1349; - for (_i1349 = 0; _i1349 < _size1345; ++_i1349) + uint32_t _size1387; + ::apache::thrift::protocol::TType _etype1390; + xfer += iprot->readListBegin(_etype1390, _size1387); + this->success.resize(_size1387); + uint32_t _i1391; + for (_i1391 = 0; _i1391 < _size1387; ++_i1391) { - xfer += this->success[_i1349].read(iprot); + xfer += this->success[_i1391].read(iprot); } xfer += iprot->readListEnd(); } @@ -15205,10 +15205,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1350; - for (_iter1350 = this->success.begin(); _iter1350 != this->success.end(); ++_iter1350) + std::vector ::const_iterator _iter1392; + for (_iter1392 = this->success.begin(); _iter1392 != this->success.end(); ++_iter1392) { - xfer += (*_iter1350).write(oprot); + xfer += (*_iter1392).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15265,14 +15265,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1351; - ::apache::thrift::protocol::TType _etype1354; - xfer += iprot->readListBegin(_etype1354, _size1351); - (*(this->success)).resize(_size1351); - uint32_t _i1355; - for (_i1355 = 0; _i1355 < _size1351; ++_i1355) + uint32_t _size1393; + ::apache::thrift::protocol::TType _etype1396; + xfer += iprot->readListBegin(_etype1396, _size1393); + (*(this->success)).resize(_size1393); + uint32_t _i1397; + for (_i1397 = 0; _i1397 < _size1393; ++_i1397) { - xfer += (*(this->success))[_i1355].read(iprot); + xfer += (*(this->success))[_i1397].read(iprot); } xfer += iprot->readListEnd(); } @@ -15371,14 +15371,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1356; - ::apache::thrift::protocol::TType _etype1359; - xfer += iprot->readListBegin(_etype1359, _size1356); - this->part_vals.resize(_size1356); - uint32_t _i1360; - for (_i1360 = 0; _i1360 < _size1356; ++_i1360) + uint32_t _size1398; + ::apache::thrift::protocol::TType _etype1401; + xfer += iprot->readListBegin(_etype1401, _size1398); + this->part_vals.resize(_size1398); + uint32_t _i1402; + for (_i1402 = 0; _i1402 < _size1398; ++_i1402) { - xfer += iprot->readString(this->part_vals[_i1360]); + xfer += iprot->readString(this->part_vals[_i1402]); } xfer += iprot->readListEnd(); } @@ -15399,14 +15399,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1361; - ::apache::thrift::protocol::TType _etype1364; - xfer += iprot->readListBegin(_etype1364, _size1361); - this->group_names.resize(_size1361); - uint32_t _i1365; - for (_i1365 = 0; _i1365 < _size1361; ++_i1365) + uint32_t _size1403; + ::apache::thrift::protocol::TType _etype1406; + xfer += iprot->readListBegin(_etype1406, _size1403); + this->group_names.resize(_size1403); + uint32_t _i1407; + for (_i1407 = 0; _i1407 < _size1403; ++_i1407) { - xfer += iprot->readString(this->group_names[_i1365]); + xfer += iprot->readString(this->group_names[_i1407]); } xfer += iprot->readListEnd(); } @@ -15443,10 +15443,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1366; - for (_iter1366 = this->part_vals.begin(); _iter1366 != this->part_vals.end(); ++_iter1366) + std::vector ::const_iterator _iter1408; + for (_iter1408 = this->part_vals.begin(); _iter1408 != this->part_vals.end(); ++_iter1408) { - xfer += oprot->writeString((*_iter1366)); + xfer += oprot->writeString((*_iter1408)); } xfer += oprot->writeListEnd(); } @@ -15459,10 +15459,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1367; - for (_iter1367 = this->group_names.begin(); _iter1367 != this->group_names.end(); ++_iter1367) + std::vector ::const_iterator _iter1409; + for (_iter1409 = this->group_names.begin(); _iter1409 != this->group_names.end(); ++_iter1409) { - xfer += oprot->writeString((*_iter1367)); + xfer += oprot->writeString((*_iter1409)); } xfer += oprot->writeListEnd(); } @@ -15494,10 +15494,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1368; - for (_iter1368 = (*(this->part_vals)).begin(); _iter1368 != (*(this->part_vals)).end(); ++_iter1368) + std::vector ::const_iterator _iter1410; + for (_iter1410 = (*(this->part_vals)).begin(); _iter1410 != (*(this->part_vals)).end(); ++_iter1410) { - xfer += oprot->writeString((*_iter1368)); + xfer += oprot->writeString((*_iter1410)); } xfer += oprot->writeListEnd(); } @@ -15510,10 +15510,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1369; - for (_iter1369 = (*(this->group_names)).begin(); _iter1369 != (*(this->group_names)).end(); ++_iter1369) + std::vector ::const_iterator _iter1411; + for (_iter1411 = (*(this->group_names)).begin(); _iter1411 != (*(this->group_names)).end(); ++_iter1411) { - xfer += oprot->writeString((*_iter1369)); + xfer += oprot->writeString((*_iter1411)); } xfer += oprot->writeListEnd(); } @@ -16072,14 +16072,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1370; - ::apache::thrift::protocol::TType _etype1373; - xfer += iprot->readListBegin(_etype1373, _size1370); - this->success.resize(_size1370); - uint32_t _i1374; - for (_i1374 = 0; _i1374 < _size1370; ++_i1374) + uint32_t _size1412; + ::apache::thrift::protocol::TType _etype1415; + xfer += iprot->readListBegin(_etype1415, _size1412); + this->success.resize(_size1412); + uint32_t _i1416; + for (_i1416 = 0; _i1416 < _size1412; ++_i1416) { - xfer += this->success[_i1374].read(iprot); + xfer += this->success[_i1416].read(iprot); } xfer += iprot->readListEnd(); } @@ -16126,10 +16126,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1375; - for (_iter1375 = this->success.begin(); _iter1375 != this->success.end(); ++_iter1375) + std::vector ::const_iterator _iter1417; + for (_iter1417 = this->success.begin(); _iter1417 != this->success.end(); ++_iter1417) { - xfer += (*_iter1375).write(oprot); + xfer += (*_iter1417).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16178,14 +16178,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1376; - ::apache::thrift::protocol::TType _etype1379; - xfer += iprot->readListBegin(_etype1379, _size1376); - (*(this->success)).resize(_size1376); - uint32_t _i1380; - for (_i1380 = 0; _i1380 < _size1376; ++_i1380) + uint32_t _size1418; + ::apache::thrift::protocol::TType _etype1421; + xfer += iprot->readListBegin(_etype1421, _size1418); + (*(this->success)).resize(_size1418); + uint32_t _i1422; + for (_i1422 = 0; _i1422 < _size1418; ++_i1422) { - xfer += (*(this->success))[_i1380].read(iprot); + xfer += (*(this->success))[_i1422].read(iprot); } xfer += iprot->readListEnd(); } @@ -16284,14 +16284,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1381; - ::apache::thrift::protocol::TType _etype1384; - xfer += iprot->readListBegin(_etype1384, _size1381); - this->group_names.resize(_size1381); - uint32_t _i1385; - for (_i1385 = 0; _i1385 < _size1381; ++_i1385) + uint32_t _size1423; + ::apache::thrift::protocol::TType _etype1426; + xfer += iprot->readListBegin(_etype1426, _size1423); + this->group_names.resize(_size1423); + uint32_t _i1427; + for (_i1427 = 0; _i1427 < _size1423; ++_i1427) { - xfer += iprot->readString(this->group_names[_i1385]); + xfer += iprot->readString(this->group_names[_i1427]); } xfer += iprot->readListEnd(); } @@ -16336,10 +16336,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1386; - for (_iter1386 = this->group_names.begin(); _iter1386 != this->group_names.end(); ++_iter1386) + std::vector ::const_iterator _iter1428; + for (_iter1428 = this->group_names.begin(); _iter1428 != this->group_names.end(); ++_iter1428) { - xfer += oprot->writeString((*_iter1386)); + xfer += oprot->writeString((*_iter1428)); } xfer += oprot->writeListEnd(); } @@ -16379,10 +16379,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1387; - for (_iter1387 = (*(this->group_names)).begin(); _iter1387 != (*(this->group_names)).end(); ++_iter1387) + std::vector ::const_iterator _iter1429; + for (_iter1429 = (*(this->group_names)).begin(); _iter1429 != (*(this->group_names)).end(); ++_iter1429) { - xfer += oprot->writeString((*_iter1387)); + xfer += oprot->writeString((*_iter1429)); } xfer += oprot->writeListEnd(); } @@ -16423,14 +16423,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1388; - ::apache::thrift::protocol::TType _etype1391; - xfer += iprot->readListBegin(_etype1391, _size1388); - this->success.resize(_size1388); - uint32_t _i1392; - for (_i1392 = 0; _i1392 < _size1388; ++_i1392) + uint32_t _size1430; + ::apache::thrift::protocol::TType _etype1433; + xfer += iprot->readListBegin(_etype1433, _size1430); + this->success.resize(_size1430); + uint32_t _i1434; + for (_i1434 = 0; _i1434 < _size1430; ++_i1434) { - xfer += this->success[_i1392].read(iprot); + xfer += this->success[_i1434].read(iprot); } xfer += iprot->readListEnd(); } @@ -16477,10 +16477,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1393; - for (_iter1393 = this->success.begin(); _iter1393 != this->success.end(); ++_iter1393) + std::vector ::const_iterator _iter1435; + for (_iter1435 = this->success.begin(); _iter1435 != this->success.end(); ++_iter1435) { - xfer += (*_iter1393).write(oprot); + xfer += (*_iter1435).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16529,14 +16529,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1394; - ::apache::thrift::protocol::TType _etype1397; - xfer += iprot->readListBegin(_etype1397, _size1394); - (*(this->success)).resize(_size1394); - uint32_t _i1398; - for (_i1398 = 0; _i1398 < _size1394; ++_i1398) + uint32_t _size1436; + ::apache::thrift::protocol::TType _etype1439; + xfer += iprot->readListBegin(_etype1439, _size1436); + (*(this->success)).resize(_size1436); + uint32_t _i1440; + for (_i1440 = 0; _i1440 < _size1436; ++_i1440) { - xfer += (*(this->success))[_i1398].read(iprot); + xfer += (*(this->success))[_i1440].read(iprot); } xfer += iprot->readListEnd(); } @@ -16714,14 +16714,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1399; - ::apache::thrift::protocol::TType _etype1402; - xfer += iprot->readListBegin(_etype1402, _size1399); - this->success.resize(_size1399); - uint32_t _i1403; - for (_i1403 = 0; _i1403 < _size1399; ++_i1403) + uint32_t _size1441; + ::apache::thrift::protocol::TType _etype1444; + xfer += iprot->readListBegin(_etype1444, _size1441); + this->success.resize(_size1441); + uint32_t _i1445; + for (_i1445 = 0; _i1445 < _size1441; ++_i1445) { - xfer += this->success[_i1403].read(iprot); + xfer += this->success[_i1445].read(iprot); } xfer += iprot->readListEnd(); } @@ -16768,10 +16768,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1404; - for (_iter1404 = this->success.begin(); _iter1404 != this->success.end(); ++_iter1404) + std::vector ::const_iterator _iter1446; + for (_iter1446 = this->success.begin(); _iter1446 != this->success.end(); ++_iter1446) { - xfer += (*_iter1404).write(oprot); + xfer += (*_iter1446).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16820,14 +16820,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1405; - ::apache::thrift::protocol::TType _etype1408; - xfer += iprot->readListBegin(_etype1408, _size1405); - (*(this->success)).resize(_size1405); - uint32_t _i1409; - for (_i1409 = 0; _i1409 < _size1405; ++_i1409) + uint32_t _size1447; + ::apache::thrift::protocol::TType _etype1450; + xfer += iprot->readListBegin(_etype1450, _size1447); + (*(this->success)).resize(_size1447); + uint32_t _i1451; + for (_i1451 = 0; _i1451 < _size1447; ++_i1451) { - xfer += (*(this->success))[_i1409].read(iprot); + xfer += (*(this->success))[_i1451].read(iprot); } xfer += iprot->readListEnd(); } @@ -17005,14 +17005,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1410; - ::apache::thrift::protocol::TType _etype1413; - xfer += iprot->readListBegin(_etype1413, _size1410); - this->success.resize(_size1410); - uint32_t _i1414; - for (_i1414 = 0; _i1414 < _size1410; ++_i1414) + uint32_t _size1452; + ::apache::thrift::protocol::TType _etype1455; + xfer += iprot->readListBegin(_etype1455, _size1452); + this->success.resize(_size1452); + uint32_t _i1456; + for (_i1456 = 0; _i1456 < _size1452; ++_i1456) { - xfer += iprot->readString(this->success[_i1414]); + xfer += iprot->readString(this->success[_i1456]); } xfer += iprot->readListEnd(); } @@ -17059,10 +17059,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1415; - for (_iter1415 = this->success.begin(); _iter1415 != this->success.end(); ++_iter1415) + std::vector ::const_iterator _iter1457; + for (_iter1457 = this->success.begin(); _iter1457 != this->success.end(); ++_iter1457) { - xfer += oprot->writeString((*_iter1415)); + xfer += oprot->writeString((*_iter1457)); } xfer += oprot->writeListEnd(); } @@ -17111,14 +17111,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1416; - ::apache::thrift::protocol::TType _etype1419; - xfer += iprot->readListBegin(_etype1419, _size1416); - (*(this->success)).resize(_size1416); - uint32_t _i1420; - for (_i1420 = 0; _i1420 < _size1416; ++_i1420) + uint32_t _size1458; + ::apache::thrift::protocol::TType _etype1461; + xfer += iprot->readListBegin(_etype1461, _size1458); + (*(this->success)).resize(_size1458); + uint32_t _i1462; + for (_i1462 = 0; _i1462 < _size1458; ++_i1462) { - xfer += iprot->readString((*(this->success))[_i1420]); + xfer += iprot->readString((*(this->success))[_i1462]); } xfer += iprot->readListEnd(); } @@ -17428,14 +17428,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1421; - ::apache::thrift::protocol::TType _etype1424; - xfer += iprot->readListBegin(_etype1424, _size1421); - this->part_vals.resize(_size1421); - uint32_t _i1425; - for (_i1425 = 0; _i1425 < _size1421; ++_i1425) + uint32_t _size1463; + ::apache::thrift::protocol::TType _etype1466; + xfer += iprot->readListBegin(_etype1466, _size1463); + this->part_vals.resize(_size1463); + uint32_t _i1467; + for (_i1467 = 0; _i1467 < _size1463; ++_i1467) { - xfer += iprot->readString(this->part_vals[_i1425]); + xfer += iprot->readString(this->part_vals[_i1467]); } xfer += iprot->readListEnd(); } @@ -17480,10 +17480,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1426; - for (_iter1426 = this->part_vals.begin(); _iter1426 != this->part_vals.end(); ++_iter1426) + std::vector ::const_iterator _iter1468; + for (_iter1468 = this->part_vals.begin(); _iter1468 != this->part_vals.end(); ++_iter1468) { - xfer += oprot->writeString((*_iter1426)); + xfer += oprot->writeString((*_iter1468)); } xfer += oprot->writeListEnd(); } @@ -17519,10 +17519,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1427; - for (_iter1427 = (*(this->part_vals)).begin(); _iter1427 != (*(this->part_vals)).end(); ++_iter1427) + std::vector ::const_iterator _iter1469; + for (_iter1469 = (*(this->part_vals)).begin(); _iter1469 != (*(this->part_vals)).end(); ++_iter1469) { - xfer += oprot->writeString((*_iter1427)); + xfer += oprot->writeString((*_iter1469)); } xfer += oprot->writeListEnd(); } @@ -17567,14 +17567,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1428; - ::apache::thrift::protocol::TType _etype1431; - xfer += iprot->readListBegin(_etype1431, _size1428); - this->success.resize(_size1428); - uint32_t _i1432; - for (_i1432 = 0; _i1432 < _size1428; ++_i1432) + uint32_t _size1470; + ::apache::thrift::protocol::TType _etype1473; + xfer += iprot->readListBegin(_etype1473, _size1470); + this->success.resize(_size1470); + uint32_t _i1474; + for (_i1474 = 0; _i1474 < _size1470; ++_i1474) { - xfer += this->success[_i1432].read(iprot); + xfer += this->success[_i1474].read(iprot); } xfer += iprot->readListEnd(); } @@ -17621,10 +17621,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1433; - for (_iter1433 = this->success.begin(); _iter1433 != this->success.end(); ++_iter1433) + std::vector ::const_iterator _iter1475; + for (_iter1475 = this->success.begin(); _iter1475 != this->success.end(); ++_iter1475) { - xfer += (*_iter1433).write(oprot); + xfer += (*_iter1475).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17673,14 +17673,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1434; - ::apache::thrift::protocol::TType _etype1437; - xfer += iprot->readListBegin(_etype1437, _size1434); - (*(this->success)).resize(_size1434); - uint32_t _i1438; - for (_i1438 = 0; _i1438 < _size1434; ++_i1438) + uint32_t _size1476; + ::apache::thrift::protocol::TType _etype1479; + xfer += iprot->readListBegin(_etype1479, _size1476); + (*(this->success)).resize(_size1476); + uint32_t _i1480; + for (_i1480 = 0; _i1480 < _size1476; ++_i1480) { - xfer += (*(this->success))[_i1438].read(iprot); + xfer += (*(this->success))[_i1480].read(iprot); } xfer += iprot->readListEnd(); } @@ -17763,14 +17763,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1439; - ::apache::thrift::protocol::TType _etype1442; - xfer += iprot->readListBegin(_etype1442, _size1439); - this->part_vals.resize(_size1439); - uint32_t _i1443; - for (_i1443 = 0; _i1443 < _size1439; ++_i1443) + uint32_t _size1481; + ::apache::thrift::protocol::TType _etype1484; + xfer += iprot->readListBegin(_etype1484, _size1481); + this->part_vals.resize(_size1481); + uint32_t _i1485; + for (_i1485 = 0; _i1485 < _size1481; ++_i1485) { - xfer += iprot->readString(this->part_vals[_i1443]); + xfer += iprot->readString(this->part_vals[_i1485]); } xfer += iprot->readListEnd(); } @@ -17799,14 +17799,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1444; - ::apache::thrift::protocol::TType _etype1447; - xfer += iprot->readListBegin(_etype1447, _size1444); - this->group_names.resize(_size1444); - uint32_t _i1448; - for (_i1448 = 0; _i1448 < _size1444; ++_i1448) + uint32_t _size1486; + ::apache::thrift::protocol::TType _etype1489; + xfer += iprot->readListBegin(_etype1489, _size1486); + this->group_names.resize(_size1486); + uint32_t _i1490; + for (_i1490 = 0; _i1490 < _size1486; ++_i1490) { - xfer += iprot->readString(this->group_names[_i1448]); + xfer += iprot->readString(this->group_names[_i1490]); } xfer += iprot->readListEnd(); } @@ -17843,10 +17843,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1449; - for (_iter1449 = this->part_vals.begin(); _iter1449 != this->part_vals.end(); ++_iter1449) + std::vector ::const_iterator _iter1491; + for (_iter1491 = this->part_vals.begin(); _iter1491 != this->part_vals.end(); ++_iter1491) { - xfer += oprot->writeString((*_iter1449)); + xfer += oprot->writeString((*_iter1491)); } xfer += oprot->writeListEnd(); } @@ -17863,10 +17863,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1450; - for (_iter1450 = this->group_names.begin(); _iter1450 != this->group_names.end(); ++_iter1450) + std::vector ::const_iterator _iter1492; + for (_iter1492 = this->group_names.begin(); _iter1492 != this->group_names.end(); ++_iter1492) { - xfer += oprot->writeString((*_iter1450)); + xfer += oprot->writeString((*_iter1492)); } xfer += oprot->writeListEnd(); } @@ -17898,10 +17898,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1451; - for (_iter1451 = (*(this->part_vals)).begin(); _iter1451 != (*(this->part_vals)).end(); ++_iter1451) + std::vector ::const_iterator _iter1493; + for (_iter1493 = (*(this->part_vals)).begin(); _iter1493 != (*(this->part_vals)).end(); ++_iter1493) { - xfer += oprot->writeString((*_iter1451)); + xfer += oprot->writeString((*_iter1493)); } xfer += oprot->writeListEnd(); } @@ -17918,10 +17918,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1452; - for (_iter1452 = (*(this->group_names)).begin(); _iter1452 != (*(this->group_names)).end(); ++_iter1452) + std::vector ::const_iterator _iter1494; + for (_iter1494 = (*(this->group_names)).begin(); _iter1494 != (*(this->group_names)).end(); ++_iter1494) { - xfer += oprot->writeString((*_iter1452)); + xfer += oprot->writeString((*_iter1494)); } xfer += oprot->writeListEnd(); } @@ -17962,14 +17962,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1453; - ::apache::thrift::protocol::TType _etype1456; - xfer += iprot->readListBegin(_etype1456, _size1453); - this->success.resize(_size1453); - uint32_t _i1457; - for (_i1457 = 0; _i1457 < _size1453; ++_i1457) + uint32_t _size1495; + ::apache::thrift::protocol::TType _etype1498; + xfer += iprot->readListBegin(_etype1498, _size1495); + this->success.resize(_size1495); + uint32_t _i1499; + for (_i1499 = 0; _i1499 < _size1495; ++_i1499) { - xfer += this->success[_i1457].read(iprot); + xfer += this->success[_i1499].read(iprot); } xfer += iprot->readListEnd(); } @@ -18016,10 +18016,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1458; - for (_iter1458 = this->success.begin(); _iter1458 != this->success.end(); ++_iter1458) + std::vector ::const_iterator _iter1500; + for (_iter1500 = this->success.begin(); _iter1500 != this->success.end(); ++_iter1500) { - xfer += (*_iter1458).write(oprot); + xfer += (*_iter1500).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18068,14 +18068,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1459; - ::apache::thrift::protocol::TType _etype1462; - xfer += iprot->readListBegin(_etype1462, _size1459); - (*(this->success)).resize(_size1459); - uint32_t _i1463; - for (_i1463 = 0; _i1463 < _size1459; ++_i1463) + uint32_t _size1501; + ::apache::thrift::protocol::TType _etype1504; + xfer += iprot->readListBegin(_etype1504, _size1501); + (*(this->success)).resize(_size1501); + uint32_t _i1505; + for (_i1505 = 0; _i1505 < _size1501; ++_i1505) { - xfer += (*(this->success))[_i1463].read(iprot); + xfer += (*(this->success))[_i1505].read(iprot); } xfer += iprot->readListEnd(); } @@ -18158,14 +18158,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1464; - ::apache::thrift::protocol::TType _etype1467; - xfer += iprot->readListBegin(_etype1467, _size1464); - this->part_vals.resize(_size1464); - uint32_t _i1468; - for (_i1468 = 0; _i1468 < _size1464; ++_i1468) + uint32_t _size1506; + ::apache::thrift::protocol::TType _etype1509; + xfer += iprot->readListBegin(_etype1509, _size1506); + this->part_vals.resize(_size1506); + uint32_t _i1510; + for (_i1510 = 0; _i1510 < _size1506; ++_i1510) { - xfer += iprot->readString(this->part_vals[_i1468]); + xfer += iprot->readString(this->part_vals[_i1510]); } xfer += iprot->readListEnd(); } @@ -18210,10 +18210,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1469; - for (_iter1469 = this->part_vals.begin(); _iter1469 != this->part_vals.end(); ++_iter1469) + std::vector ::const_iterator _iter1511; + for (_iter1511 = this->part_vals.begin(); _iter1511 != this->part_vals.end(); ++_iter1511) { - xfer += oprot->writeString((*_iter1469)); + xfer += oprot->writeString((*_iter1511)); } xfer += oprot->writeListEnd(); } @@ -18249,10 +18249,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1470; - for (_iter1470 = (*(this->part_vals)).begin(); _iter1470 != (*(this->part_vals)).end(); ++_iter1470) + std::vector ::const_iterator _iter1512; + for (_iter1512 = (*(this->part_vals)).begin(); _iter1512 != (*(this->part_vals)).end(); ++_iter1512) { - xfer += oprot->writeString((*_iter1470)); + xfer += oprot->writeString((*_iter1512)); } xfer += oprot->writeListEnd(); } @@ -18297,14 +18297,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1471; - ::apache::thrift::protocol::TType _etype1474; - xfer += iprot->readListBegin(_etype1474, _size1471); - this->success.resize(_size1471); - uint32_t _i1475; - for (_i1475 = 0; _i1475 < _size1471; ++_i1475) + uint32_t _size1513; + ::apache::thrift::protocol::TType _etype1516; + xfer += iprot->readListBegin(_etype1516, _size1513); + this->success.resize(_size1513); + uint32_t _i1517; + for (_i1517 = 0; _i1517 < _size1513; ++_i1517) { - xfer += iprot->readString(this->success[_i1475]); + xfer += iprot->readString(this->success[_i1517]); } xfer += iprot->readListEnd(); } @@ -18351,10 +18351,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1476; - for (_iter1476 = this->success.begin(); _iter1476 != this->success.end(); ++_iter1476) + std::vector ::const_iterator _iter1518; + for (_iter1518 = this->success.begin(); _iter1518 != this->success.end(); ++_iter1518) { - xfer += oprot->writeString((*_iter1476)); + xfer += oprot->writeString((*_iter1518)); } xfer += oprot->writeListEnd(); } @@ -18403,14 +18403,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1477; - ::apache::thrift::protocol::TType _etype1480; - xfer += iprot->readListBegin(_etype1480, _size1477); - (*(this->success)).resize(_size1477); - uint32_t _i1481; - for (_i1481 = 0; _i1481 < _size1477; ++_i1481) + uint32_t _size1519; + ::apache::thrift::protocol::TType _etype1522; + xfer += iprot->readListBegin(_etype1522, _size1519); + (*(this->success)).resize(_size1519); + uint32_t _i1523; + for (_i1523 = 0; _i1523 < _size1519; ++_i1523) { - xfer += iprot->readString((*(this->success))[_i1481]); + xfer += iprot->readString((*(this->success))[_i1523]); } xfer += iprot->readListEnd(); } @@ -18604,14 +18604,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1482; - ::apache::thrift::protocol::TType _etype1485; - xfer += iprot->readListBegin(_etype1485, _size1482); - this->success.resize(_size1482); - uint32_t _i1486; - for (_i1486 = 0; _i1486 < _size1482; ++_i1486) + uint32_t _size1524; + ::apache::thrift::protocol::TType _etype1527; + xfer += iprot->readListBegin(_etype1527, _size1524); + this->success.resize(_size1524); + uint32_t _i1528; + for (_i1528 = 0; _i1528 < _size1524; ++_i1528) { - xfer += this->success[_i1486].read(iprot); + xfer += this->success[_i1528].read(iprot); } xfer += iprot->readListEnd(); } @@ -18658,10 +18658,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1487; - for (_iter1487 = this->success.begin(); _iter1487 != this->success.end(); ++_iter1487) + std::vector ::const_iterator _iter1529; + for (_iter1529 = this->success.begin(); _iter1529 != this->success.end(); ++_iter1529) { - xfer += (*_iter1487).write(oprot); + xfer += (*_iter1529).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18710,14 +18710,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1488; - ::apache::thrift::protocol::TType _etype1491; - xfer += iprot->readListBegin(_etype1491, _size1488); - (*(this->success)).resize(_size1488); - uint32_t _i1492; - for (_i1492 = 0; _i1492 < _size1488; ++_i1492) + uint32_t _size1530; + ::apache::thrift::protocol::TType _etype1533; + xfer += iprot->readListBegin(_etype1533, _size1530); + (*(this->success)).resize(_size1530); + uint32_t _i1534; + for (_i1534 = 0; _i1534 < _size1530; ++_i1534) { - xfer += (*(this->success))[_i1492].read(iprot); + xfer += (*(this->success))[_i1534].read(iprot); } xfer += iprot->readListEnd(); } @@ -18911,14 +18911,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1493; - ::apache::thrift::protocol::TType _etype1496; - xfer += iprot->readListBegin(_etype1496, _size1493); - this->success.resize(_size1493); - uint32_t _i1497; - for (_i1497 = 0; _i1497 < _size1493; ++_i1497) + uint32_t _size1535; + ::apache::thrift::protocol::TType _etype1538; + xfer += iprot->readListBegin(_etype1538, _size1535); + this->success.resize(_size1535); + uint32_t _i1539; + for (_i1539 = 0; _i1539 < _size1535; ++_i1539) { - xfer += this->success[_i1497].read(iprot); + xfer += this->success[_i1539].read(iprot); } xfer += iprot->readListEnd(); } @@ -18965,10 +18965,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1498; - for (_iter1498 = this->success.begin(); _iter1498 != this->success.end(); ++_iter1498) + std::vector ::const_iterator _iter1540; + for (_iter1540 = this->success.begin(); _iter1540 != this->success.end(); ++_iter1540) { - xfer += (*_iter1498).write(oprot); + xfer += (*_iter1540).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19017,14 +19017,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1499; - ::apache::thrift::protocol::TType _etype1502; - xfer += iprot->readListBegin(_etype1502, _size1499); - (*(this->success)).resize(_size1499); - uint32_t _i1503; - for (_i1503 = 0; _i1503 < _size1499; ++_i1503) + uint32_t _size1541; + ::apache::thrift::protocol::TType _etype1544; + xfer += iprot->readListBegin(_etype1544, _size1541); + (*(this->success)).resize(_size1541); + uint32_t _i1545; + for (_i1545 = 0; _i1545 < _size1541; ++_i1545) { - xfer += (*(this->success))[_i1503].read(iprot); + xfer += (*(this->success))[_i1545].read(iprot); } xfer += iprot->readListEnd(); } @@ -19593,14 +19593,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size1504; - ::apache::thrift::protocol::TType _etype1507; - xfer += iprot->readListBegin(_etype1507, _size1504); - this->names.resize(_size1504); - uint32_t _i1508; - for (_i1508 = 0; _i1508 < _size1504; ++_i1508) + uint32_t _size1546; + ::apache::thrift::protocol::TType _etype1549; + xfer += iprot->readListBegin(_etype1549, _size1546); + this->names.resize(_size1546); + uint32_t _i1550; + for (_i1550 = 0; _i1550 < _size1546; ++_i1550) { - xfer += iprot->readString(this->names[_i1508]); + xfer += iprot->readString(this->names[_i1550]); } xfer += iprot->readListEnd(); } @@ -19637,10 +19637,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1509; - for (_iter1509 = this->names.begin(); _iter1509 != this->names.end(); ++_iter1509) + std::vector ::const_iterator _iter1551; + for (_iter1551 = this->names.begin(); _iter1551 != this->names.end(); ++_iter1551) { - xfer += oprot->writeString((*_iter1509)); + xfer += oprot->writeString((*_iter1551)); } xfer += oprot->writeListEnd(); } @@ -19672,10 +19672,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1510; - for (_iter1510 = (*(this->names)).begin(); _iter1510 != (*(this->names)).end(); ++_iter1510) + std::vector ::const_iterator _iter1552; + for (_iter1552 = (*(this->names)).begin(); _iter1552 != (*(this->names)).end(); ++_iter1552) { - xfer += oprot->writeString((*_iter1510)); + xfer += oprot->writeString((*_iter1552)); } xfer += oprot->writeListEnd(); } @@ -19716,14 +19716,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1511; - ::apache::thrift::protocol::TType _etype1514; - xfer += iprot->readListBegin(_etype1514, _size1511); - this->success.resize(_size1511); - uint32_t _i1515; - for (_i1515 = 0; _i1515 < _size1511; ++_i1515) + uint32_t _size1553; + ::apache::thrift::protocol::TType _etype1556; + xfer += iprot->readListBegin(_etype1556, _size1553); + this->success.resize(_size1553); + uint32_t _i1557; + for (_i1557 = 0; _i1557 < _size1553; ++_i1557) { - xfer += this->success[_i1515].read(iprot); + xfer += this->success[_i1557].read(iprot); } xfer += iprot->readListEnd(); } @@ -19770,10 +19770,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1516; - for (_iter1516 = this->success.begin(); _iter1516 != this->success.end(); ++_iter1516) + std::vector ::const_iterator _iter1558; + for (_iter1558 = this->success.begin(); _iter1558 != this->success.end(); ++_iter1558) { - xfer += (*_iter1516).write(oprot); + xfer += (*_iter1558).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19822,14 +19822,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1517; - ::apache::thrift::protocol::TType _etype1520; - xfer += iprot->readListBegin(_etype1520, _size1517); - (*(this->success)).resize(_size1517); - uint32_t _i1521; - for (_i1521 = 0; _i1521 < _size1517; ++_i1521) + uint32_t _size1559; + ::apache::thrift::protocol::TType _etype1562; + xfer += iprot->readListBegin(_etype1562, _size1559); + (*(this->success)).resize(_size1559); + uint32_t _i1563; + for (_i1563 = 0; _i1563 < _size1559; ++_i1563) { - xfer += (*(this->success))[_i1521].read(iprot); + xfer += (*(this->success))[_i1563].read(iprot); } xfer += iprot->readListEnd(); } @@ -20151,14 +20151,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1522; - ::apache::thrift::protocol::TType _etype1525; - xfer += iprot->readListBegin(_etype1525, _size1522); - this->new_parts.resize(_size1522); - uint32_t _i1526; - for (_i1526 = 0; _i1526 < _size1522; ++_i1526) + uint32_t _size1564; + ::apache::thrift::protocol::TType _etype1567; + xfer += iprot->readListBegin(_etype1567, _size1564); + this->new_parts.resize(_size1564); + uint32_t _i1568; + for (_i1568 = 0; _i1568 < _size1564; ++_i1568) { - xfer += this->new_parts[_i1526].read(iprot); + xfer += this->new_parts[_i1568].read(iprot); } xfer += iprot->readListEnd(); } @@ -20195,10 +20195,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1527; - for (_iter1527 = this->new_parts.begin(); _iter1527 != this->new_parts.end(); ++_iter1527) + std::vector ::const_iterator _iter1569; + for (_iter1569 = this->new_parts.begin(); _iter1569 != this->new_parts.end(); ++_iter1569) { - xfer += (*_iter1527).write(oprot); + xfer += (*_iter1569).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20230,10 +20230,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1528; - for (_iter1528 = (*(this->new_parts)).begin(); _iter1528 != (*(this->new_parts)).end(); ++_iter1528) + std::vector ::const_iterator _iter1570; + for (_iter1570 = (*(this->new_parts)).begin(); _iter1570 != (*(this->new_parts)).end(); ++_iter1570) { - xfer += (*_iter1528).write(oprot); + xfer += (*_iter1570).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20418,14 +20418,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1529; - ::apache::thrift::protocol::TType _etype1532; - xfer += iprot->readListBegin(_etype1532, _size1529); - this->new_parts.resize(_size1529); - uint32_t _i1533; - for (_i1533 = 0; _i1533 < _size1529; ++_i1533) + uint32_t _size1571; + ::apache::thrift::protocol::TType _etype1574; + xfer += iprot->readListBegin(_etype1574, _size1571); + this->new_parts.resize(_size1571); + uint32_t _i1575; + for (_i1575 = 0; _i1575 < _size1571; ++_i1575) { - xfer += this->new_parts[_i1533].read(iprot); + xfer += this->new_parts[_i1575].read(iprot); } xfer += iprot->readListEnd(); } @@ -20470,10 +20470,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1534; - for (_iter1534 = this->new_parts.begin(); _iter1534 != this->new_parts.end(); ++_iter1534) + std::vector ::const_iterator _iter1576; + for (_iter1576 = this->new_parts.begin(); _iter1576 != this->new_parts.end(); ++_iter1576) { - xfer += (*_iter1534).write(oprot); + xfer += (*_iter1576).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20509,10 +20509,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1535; - for (_iter1535 = (*(this->new_parts)).begin(); _iter1535 != (*(this->new_parts)).end(); ++_iter1535) + std::vector ::const_iterator _iter1577; + for (_iter1577 = (*(this->new_parts)).begin(); _iter1577 != (*(this->new_parts)).end(); ++_iter1577) { - xfer += (*_iter1535).write(oprot); + xfer += (*_iter1577).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20956,14 +20956,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1536; - ::apache::thrift::protocol::TType _etype1539; - xfer += iprot->readListBegin(_etype1539, _size1536); - this->part_vals.resize(_size1536); - uint32_t _i1540; - for (_i1540 = 0; _i1540 < _size1536; ++_i1540) + uint32_t _size1578; + ::apache::thrift::protocol::TType _etype1581; + xfer += iprot->readListBegin(_etype1581, _size1578); + this->part_vals.resize(_size1578); + uint32_t _i1582; + for (_i1582 = 0; _i1582 < _size1578; ++_i1582) { - xfer += iprot->readString(this->part_vals[_i1540]); + xfer += iprot->readString(this->part_vals[_i1582]); } xfer += iprot->readListEnd(); } @@ -21008,10 +21008,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1541; - for (_iter1541 = this->part_vals.begin(); _iter1541 != this->part_vals.end(); ++_iter1541) + std::vector ::const_iterator _iter1583; + for (_iter1583 = this->part_vals.begin(); _iter1583 != this->part_vals.end(); ++_iter1583) { - xfer += oprot->writeString((*_iter1541)); + xfer += oprot->writeString((*_iter1583)); } xfer += oprot->writeListEnd(); } @@ -21047,10 +21047,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1542; - for (_iter1542 = (*(this->part_vals)).begin(); _iter1542 != (*(this->part_vals)).end(); ++_iter1542) + std::vector ::const_iterator _iter1584; + for (_iter1584 = (*(this->part_vals)).begin(); _iter1584 != (*(this->part_vals)).end(); ++_iter1584) { - xfer += oprot->writeString((*_iter1542)); + xfer += oprot->writeString((*_iter1584)); } xfer += oprot->writeListEnd(); } @@ -21223,14 +21223,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1543; - ::apache::thrift::protocol::TType _etype1546; - xfer += iprot->readListBegin(_etype1546, _size1543); - this->part_vals.resize(_size1543); - uint32_t _i1547; - for (_i1547 = 0; _i1547 < _size1543; ++_i1547) + uint32_t _size1585; + ::apache::thrift::protocol::TType _etype1588; + xfer += iprot->readListBegin(_etype1588, _size1585); + this->part_vals.resize(_size1585); + uint32_t _i1589; + for (_i1589 = 0; _i1589 < _size1585; ++_i1589) { - xfer += iprot->readString(this->part_vals[_i1547]); + xfer += iprot->readString(this->part_vals[_i1589]); } xfer += iprot->readListEnd(); } @@ -21267,10 +21267,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1548; - for (_iter1548 = this->part_vals.begin(); _iter1548 != this->part_vals.end(); ++_iter1548) + std::vector ::const_iterator _iter1590; + for (_iter1590 = this->part_vals.begin(); _iter1590 != this->part_vals.end(); ++_iter1590) { - xfer += oprot->writeString((*_iter1548)); + xfer += oprot->writeString((*_iter1590)); } xfer += oprot->writeListEnd(); } @@ -21298,10 +21298,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1549; - for (_iter1549 = (*(this->part_vals)).begin(); _iter1549 != (*(this->part_vals)).end(); ++_iter1549) + std::vector ::const_iterator _iter1591; + for (_iter1591 = (*(this->part_vals)).begin(); _iter1591 != (*(this->part_vals)).end(); ++_iter1591) { - xfer += oprot->writeString((*_iter1549)); + xfer += oprot->writeString((*_iter1591)); } xfer += oprot->writeListEnd(); } @@ -21776,14 +21776,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1550; - ::apache::thrift::protocol::TType _etype1553; - xfer += iprot->readListBegin(_etype1553, _size1550); - this->success.resize(_size1550); - uint32_t _i1554; - for (_i1554 = 0; _i1554 < _size1550; ++_i1554) + uint32_t _size1592; + ::apache::thrift::protocol::TType _etype1595; + xfer += iprot->readListBegin(_etype1595, _size1592); + this->success.resize(_size1592); + uint32_t _i1596; + for (_i1596 = 0; _i1596 < _size1592; ++_i1596) { - xfer += iprot->readString(this->success[_i1554]); + xfer += iprot->readString(this->success[_i1596]); } xfer += iprot->readListEnd(); } @@ -21822,10 +21822,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1555; - for (_iter1555 = this->success.begin(); _iter1555 != this->success.end(); ++_iter1555) + std::vector ::const_iterator _iter1597; + for (_iter1597 = this->success.begin(); _iter1597 != this->success.end(); ++_iter1597) { - xfer += oprot->writeString((*_iter1555)); + xfer += oprot->writeString((*_iter1597)); } xfer += oprot->writeListEnd(); } @@ -21870,14 +21870,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1556; - ::apache::thrift::protocol::TType _etype1559; - xfer += iprot->readListBegin(_etype1559, _size1556); - (*(this->success)).resize(_size1556); - uint32_t _i1560; - for (_i1560 = 0; _i1560 < _size1556; ++_i1560) + uint32_t _size1598; + ::apache::thrift::protocol::TType _etype1601; + xfer += iprot->readListBegin(_etype1601, _size1598); + (*(this->success)).resize(_size1598); + uint32_t _i1602; + for (_i1602 = 0; _i1602 < _size1598; ++_i1602) { - xfer += iprot->readString((*(this->success))[_i1560]); + xfer += iprot->readString((*(this->success))[_i1602]); } xfer += iprot->readListEnd(); } @@ -22015,17 +22015,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1561; - ::apache::thrift::protocol::TType _ktype1562; - ::apache::thrift::protocol::TType _vtype1563; - xfer += iprot->readMapBegin(_ktype1562, _vtype1563, _size1561); - uint32_t _i1565; - for (_i1565 = 0; _i1565 < _size1561; ++_i1565) + uint32_t _size1603; + ::apache::thrift::protocol::TType _ktype1604; + ::apache::thrift::protocol::TType _vtype1605; + xfer += iprot->readMapBegin(_ktype1604, _vtype1605, _size1603); + uint32_t _i1607; + for (_i1607 = 0; _i1607 < _size1603; ++_i1607) { - std::string _key1566; - xfer += iprot->readString(_key1566); - std::string& _val1567 = this->success[_key1566]; - xfer += iprot->readString(_val1567); + std::string _key1608; + xfer += iprot->readString(_key1608); + std::string& _val1609 = this->success[_key1608]; + xfer += iprot->readString(_val1609); } xfer += iprot->readMapEnd(); } @@ -22064,11 +22064,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1568; - for (_iter1568 = this->success.begin(); _iter1568 != this->success.end(); ++_iter1568) + std::map ::const_iterator _iter1610; + for (_iter1610 = this->success.begin(); _iter1610 != this->success.end(); ++_iter1610) { - xfer += oprot->writeString(_iter1568->first); - xfer += oprot->writeString(_iter1568->second); + xfer += oprot->writeString(_iter1610->first); + xfer += oprot->writeString(_iter1610->second); } xfer += oprot->writeMapEnd(); } @@ -22113,17 +22113,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1569; - ::apache::thrift::protocol::TType _ktype1570; - ::apache::thrift::protocol::TType _vtype1571; - xfer += iprot->readMapBegin(_ktype1570, _vtype1571, _size1569); - uint32_t _i1573; - for (_i1573 = 0; _i1573 < _size1569; ++_i1573) + uint32_t _size1611; + ::apache::thrift::protocol::TType _ktype1612; + ::apache::thrift::protocol::TType _vtype1613; + xfer += iprot->readMapBegin(_ktype1612, _vtype1613, _size1611); + uint32_t _i1615; + for (_i1615 = 0; _i1615 < _size1611; ++_i1615) { - std::string _key1574; - xfer += iprot->readString(_key1574); - std::string& _val1575 = (*(this->success))[_key1574]; - xfer += iprot->readString(_val1575); + std::string _key1616; + xfer += iprot->readString(_key1616); + std::string& _val1617 = (*(this->success))[_key1616]; + xfer += iprot->readString(_val1617); } xfer += iprot->readMapEnd(); } @@ -22198,17 +22198,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1576; - ::apache::thrift::protocol::TType _ktype1577; - ::apache::thrift::protocol::TType _vtype1578; - xfer += iprot->readMapBegin(_ktype1577, _vtype1578, _size1576); - uint32_t _i1580; - for (_i1580 = 0; _i1580 < _size1576; ++_i1580) + uint32_t _size1618; + ::apache::thrift::protocol::TType _ktype1619; + ::apache::thrift::protocol::TType _vtype1620; + xfer += iprot->readMapBegin(_ktype1619, _vtype1620, _size1618); + uint32_t _i1622; + for (_i1622 = 0; _i1622 < _size1618; ++_i1622) { - std::string _key1581; - xfer += iprot->readString(_key1581); - std::string& _val1582 = this->part_vals[_key1581]; - xfer += iprot->readString(_val1582); + std::string _key1623; + xfer += iprot->readString(_key1623); + std::string& _val1624 = this->part_vals[_key1623]; + xfer += iprot->readString(_val1624); } xfer += iprot->readMapEnd(); } @@ -22219,9 +22219,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1583; - xfer += iprot->readI32(ecast1583); - this->eventType = (PartitionEventType::type)ecast1583; + int32_t ecast1625; + xfer += iprot->readI32(ecast1625); + this->eventType = (PartitionEventType::type)ecast1625; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -22255,11 +22255,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1584; - for (_iter1584 = this->part_vals.begin(); _iter1584 != this->part_vals.end(); ++_iter1584) + std::map ::const_iterator _iter1626; + for (_iter1626 = this->part_vals.begin(); _iter1626 != this->part_vals.end(); ++_iter1626) { - xfer += oprot->writeString(_iter1584->first); - xfer += oprot->writeString(_iter1584->second); + xfer += oprot->writeString(_iter1626->first); + xfer += oprot->writeString(_iter1626->second); } xfer += oprot->writeMapEnd(); } @@ -22295,11 +22295,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1585; - for (_iter1585 = (*(this->part_vals)).begin(); _iter1585 != (*(this->part_vals)).end(); ++_iter1585) + std::map ::const_iterator _iter1627; + for (_iter1627 = (*(this->part_vals)).begin(); _iter1627 != (*(this->part_vals)).end(); ++_iter1627) { - xfer += oprot->writeString(_iter1585->first); - xfer += oprot->writeString(_iter1585->second); + xfer += oprot->writeString(_iter1627->first); + xfer += oprot->writeString(_iter1627->second); } xfer += oprot->writeMapEnd(); } @@ -22568,17 +22568,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1586; - ::apache::thrift::protocol::TType _ktype1587; - ::apache::thrift::protocol::TType _vtype1588; - xfer += iprot->readMapBegin(_ktype1587, _vtype1588, _size1586); - uint32_t _i1590; - for (_i1590 = 0; _i1590 < _size1586; ++_i1590) + uint32_t _size1628; + ::apache::thrift::protocol::TType _ktype1629; + ::apache::thrift::protocol::TType _vtype1630; + xfer += iprot->readMapBegin(_ktype1629, _vtype1630, _size1628); + uint32_t _i1632; + for (_i1632 = 0; _i1632 < _size1628; ++_i1632) { - std::string _key1591; - xfer += iprot->readString(_key1591); - std::string& _val1592 = this->part_vals[_key1591]; - xfer += iprot->readString(_val1592); + std::string _key1633; + xfer += iprot->readString(_key1633); + std::string& _val1634 = this->part_vals[_key1633]; + xfer += iprot->readString(_val1634); } xfer += iprot->readMapEnd(); } @@ -22589,9 +22589,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1593; - xfer += iprot->readI32(ecast1593); - this->eventType = (PartitionEventType::type)ecast1593; + int32_t ecast1635; + xfer += iprot->readI32(ecast1635); + this->eventType = (PartitionEventType::type)ecast1635; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -22625,11 +22625,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1594; - for (_iter1594 = this->part_vals.begin(); _iter1594 != this->part_vals.end(); ++_iter1594) + std::map ::const_iterator _iter1636; + for (_iter1636 = this->part_vals.begin(); _iter1636 != this->part_vals.end(); ++_iter1636) { - xfer += oprot->writeString(_iter1594->first); - xfer += oprot->writeString(_iter1594->second); + xfer += oprot->writeString(_iter1636->first); + xfer += oprot->writeString(_iter1636->second); } xfer += oprot->writeMapEnd(); } @@ -22665,11 +22665,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1595; - for (_iter1595 = (*(this->part_vals)).begin(); _iter1595 != (*(this->part_vals)).end(); ++_iter1595) + std::map ::const_iterator _iter1637; + for (_iter1637 = (*(this->part_vals)).begin(); _iter1637 != (*(this->part_vals)).end(); ++_iter1637) { - xfer += oprot->writeString(_iter1595->first); - xfer += oprot->writeString(_iter1595->second); + xfer += oprot->writeString(_iter1637->first); + xfer += oprot->writeString(_iter1637->second); } xfer += oprot->writeMapEnd(); } @@ -24105,14 +24105,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1596; - ::apache::thrift::protocol::TType _etype1599; - xfer += iprot->readListBegin(_etype1599, _size1596); - this->success.resize(_size1596); - uint32_t _i1600; - for (_i1600 = 0; _i1600 < _size1596; ++_i1600) + uint32_t _size1638; + ::apache::thrift::protocol::TType _etype1641; + xfer += iprot->readListBegin(_etype1641, _size1638); + this->success.resize(_size1638); + uint32_t _i1642; + for (_i1642 = 0; _i1642 < _size1638; ++_i1642) { - xfer += this->success[_i1600].read(iprot); + xfer += this->success[_i1642].read(iprot); } xfer += iprot->readListEnd(); } @@ -24159,10 +24159,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1601; - for (_iter1601 = this->success.begin(); _iter1601 != this->success.end(); ++_iter1601) + std::vector ::const_iterator _iter1643; + for (_iter1643 = this->success.begin(); _iter1643 != this->success.end(); ++_iter1643) { - xfer += (*_iter1601).write(oprot); + xfer += (*_iter1643).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24211,14 +24211,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1602; - ::apache::thrift::protocol::TType _etype1605; - xfer += iprot->readListBegin(_etype1605, _size1602); - (*(this->success)).resize(_size1602); - uint32_t _i1606; - for (_i1606 = 0; _i1606 < _size1602; ++_i1606) + uint32_t _size1644; + ::apache::thrift::protocol::TType _etype1647; + xfer += iprot->readListBegin(_etype1647, _size1644); + (*(this->success)).resize(_size1644); + uint32_t _i1648; + for (_i1648 = 0; _i1648 < _size1644; ++_i1648) { - xfer += (*(this->success))[_i1606].read(iprot); + xfer += (*(this->success))[_i1648].read(iprot); } xfer += iprot->readListEnd(); } @@ -24396,14 +24396,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1607; - ::apache::thrift::protocol::TType _etype1610; - xfer += iprot->readListBegin(_etype1610, _size1607); - this->success.resize(_size1607); - uint32_t _i1611; - for (_i1611 = 0; _i1611 < _size1607; ++_i1611) + uint32_t _size1649; + ::apache::thrift::protocol::TType _etype1652; + xfer += iprot->readListBegin(_etype1652, _size1649); + this->success.resize(_size1649); + uint32_t _i1653; + for (_i1653 = 0; _i1653 < _size1649; ++_i1653) { - xfer += iprot->readString(this->success[_i1611]); + xfer += iprot->readString(this->success[_i1653]); } xfer += iprot->readListEnd(); } @@ -24442,10 +24442,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1612; - for (_iter1612 = this->success.begin(); _iter1612 != this->success.end(); ++_iter1612) + std::vector ::const_iterator _iter1654; + for (_iter1654 = this->success.begin(); _iter1654 != this->success.end(); ++_iter1654) { - xfer += oprot->writeString((*_iter1612)); + xfer += oprot->writeString((*_iter1654)); } xfer += oprot->writeListEnd(); } @@ -24490,14 +24490,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1613; - ::apache::thrift::protocol::TType _etype1616; - xfer += iprot->readListBegin(_etype1616, _size1613); - (*(this->success)).resize(_size1613); - uint32_t _i1617; - for (_i1617 = 0; _i1617 < _size1613; ++_i1617) + uint32_t _size1655; + ::apache::thrift::protocol::TType _etype1658; + xfer += iprot->readListBegin(_etype1658, _size1655); + (*(this->success)).resize(_size1655); + uint32_t _i1659; + for (_i1659 = 0; _i1659 < _size1655; ++_i1659) { - xfer += iprot->readString((*(this->success))[_i1617]); + xfer += iprot->readString((*(this->success))[_i1659]); } xfer += iprot->readListEnd(); } @@ -28978,14 +28978,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1618; - ::apache::thrift::protocol::TType _etype1621; - xfer += iprot->readListBegin(_etype1621, _size1618); - this->success.resize(_size1618); - uint32_t _i1622; - for (_i1622 = 0; _i1622 < _size1618; ++_i1622) + uint32_t _size1660; + ::apache::thrift::protocol::TType _etype1663; + xfer += iprot->readListBegin(_etype1663, _size1660); + this->success.resize(_size1660); + uint32_t _i1664; + for (_i1664 = 0; _i1664 < _size1660; ++_i1664) { - xfer += iprot->readString(this->success[_i1622]); + xfer += iprot->readString(this->success[_i1664]); } xfer += iprot->readListEnd(); } @@ -29024,10 +29024,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1623; - for (_iter1623 = this->success.begin(); _iter1623 != this->success.end(); ++_iter1623) + std::vector ::const_iterator _iter1665; + for (_iter1665 = this->success.begin(); _iter1665 != this->success.end(); ++_iter1665) { - xfer += oprot->writeString((*_iter1623)); + xfer += oprot->writeString((*_iter1665)); } xfer += oprot->writeListEnd(); } @@ -29072,14 +29072,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1624; - ::apache::thrift::protocol::TType _etype1627; - xfer += iprot->readListBegin(_etype1627, _size1624); - (*(this->success)).resize(_size1624); - uint32_t _i1628; - for (_i1628 = 0; _i1628 < _size1624; ++_i1628) + uint32_t _size1666; + ::apache::thrift::protocol::TType _etype1669; + xfer += iprot->readListBegin(_etype1669, _size1666); + (*(this->success)).resize(_size1666); + uint32_t _i1670; + for (_i1670 = 0; _i1670 < _size1666; ++_i1670) { - xfer += iprot->readString((*(this->success))[_i1628]); + xfer += iprot->readString((*(this->success))[_i1670]); } xfer += iprot->readListEnd(); } @@ -30039,14 +30039,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1629; - ::apache::thrift::protocol::TType _etype1632; - xfer += iprot->readListBegin(_etype1632, _size1629); - this->success.resize(_size1629); - uint32_t _i1633; - for (_i1633 = 0; _i1633 < _size1629; ++_i1633) + uint32_t _size1671; + ::apache::thrift::protocol::TType _etype1674; + xfer += iprot->readListBegin(_etype1674, _size1671); + this->success.resize(_size1671); + uint32_t _i1675; + for (_i1675 = 0; _i1675 < _size1671; ++_i1675) { - xfer += iprot->readString(this->success[_i1633]); + xfer += iprot->readString(this->success[_i1675]); } xfer += iprot->readListEnd(); } @@ -30085,10 +30085,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1634; - for (_iter1634 = this->success.begin(); _iter1634 != this->success.end(); ++_iter1634) + std::vector ::const_iterator _iter1676; + for (_iter1676 = this->success.begin(); _iter1676 != this->success.end(); ++_iter1676) { - xfer += oprot->writeString((*_iter1634)); + xfer += oprot->writeString((*_iter1676)); } xfer += oprot->writeListEnd(); } @@ -30133,14 +30133,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1635; - ::apache::thrift::protocol::TType _etype1638; - xfer += iprot->readListBegin(_etype1638, _size1635); - (*(this->success)).resize(_size1635); - uint32_t _i1639; - for (_i1639 = 0; _i1639 < _size1635; ++_i1639) + uint32_t _size1677; + ::apache::thrift::protocol::TType _etype1680; + xfer += iprot->readListBegin(_etype1680, _size1677); + (*(this->success)).resize(_size1677); + uint32_t _i1681; + for (_i1681 = 0; _i1681 < _size1677; ++_i1681) { - xfer += iprot->readString((*(this->success))[_i1639]); + xfer += iprot->readString((*(this->success))[_i1681]); } xfer += iprot->readListEnd(); } @@ -30213,9 +30213,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1640; - xfer += iprot->readI32(ecast1640); - this->principal_type = (PrincipalType::type)ecast1640; + int32_t ecast1682; + xfer += iprot->readI32(ecast1682); + this->principal_type = (PrincipalType::type)ecast1682; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30231,9 +30231,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1641; - xfer += iprot->readI32(ecast1641); - this->grantorType = (PrincipalType::type)ecast1641; + int32_t ecast1683; + xfer += iprot->readI32(ecast1683); + this->grantorType = (PrincipalType::type)ecast1683; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -30504,9 +30504,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1642; - xfer += iprot->readI32(ecast1642); - this->principal_type = (PrincipalType::type)ecast1642; + int32_t ecast1684; + xfer += iprot->readI32(ecast1684); + this->principal_type = (PrincipalType::type)ecast1684; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30737,9 +30737,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1643; - xfer += iprot->readI32(ecast1643); - this->principal_type = (PrincipalType::type)ecast1643; + int32_t ecast1685; + xfer += iprot->readI32(ecast1685); + this->principal_type = (PrincipalType::type)ecast1685; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30828,14 +30828,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1644; - ::apache::thrift::protocol::TType _etype1647; - xfer += iprot->readListBegin(_etype1647, _size1644); - this->success.resize(_size1644); - uint32_t _i1648; - for (_i1648 = 0; _i1648 < _size1644; ++_i1648) + uint32_t _size1686; + ::apache::thrift::protocol::TType _etype1689; + xfer += iprot->readListBegin(_etype1689, _size1686); + this->success.resize(_size1686); + uint32_t _i1690; + for (_i1690 = 0; _i1690 < _size1686; ++_i1690) { - xfer += this->success[_i1648].read(iprot); + xfer += this->success[_i1690].read(iprot); } xfer += iprot->readListEnd(); } @@ -30874,10 +30874,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1649; - for (_iter1649 = this->success.begin(); _iter1649 != this->success.end(); ++_iter1649) + std::vector ::const_iterator _iter1691; + for (_iter1691 = this->success.begin(); _iter1691 != this->success.end(); ++_iter1691) { - xfer += (*_iter1649).write(oprot); + xfer += (*_iter1691).write(oprot); } xfer += oprot->writeListEnd(); } @@ -30922,14 +30922,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1650; - ::apache::thrift::protocol::TType _etype1653; - xfer += iprot->readListBegin(_etype1653, _size1650); - (*(this->success)).resize(_size1650); - uint32_t _i1654; - for (_i1654 = 0; _i1654 < _size1650; ++_i1654) + uint32_t _size1692; + ::apache::thrift::protocol::TType _etype1695; + xfer += iprot->readListBegin(_etype1695, _size1692); + (*(this->success)).resize(_size1692); + uint32_t _i1696; + for (_i1696 = 0; _i1696 < _size1692; ++_i1696) { - xfer += (*(this->success))[_i1654].read(iprot); + xfer += (*(this->success))[_i1696].read(iprot); } xfer += iprot->readListEnd(); } @@ -31625,14 +31625,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1655; - ::apache::thrift::protocol::TType _etype1658; - xfer += iprot->readListBegin(_etype1658, _size1655); - this->group_names.resize(_size1655); - uint32_t _i1659; - for (_i1659 = 0; _i1659 < _size1655; ++_i1659) + uint32_t _size1697; + ::apache::thrift::protocol::TType _etype1700; + xfer += iprot->readListBegin(_etype1700, _size1697); + this->group_names.resize(_size1697); + uint32_t _i1701; + for (_i1701 = 0; _i1701 < _size1697; ++_i1701) { - xfer += iprot->readString(this->group_names[_i1659]); + xfer += iprot->readString(this->group_names[_i1701]); } xfer += iprot->readListEnd(); } @@ -31669,10 +31669,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1660; - for (_iter1660 = this->group_names.begin(); _iter1660 != this->group_names.end(); ++_iter1660) + std::vector ::const_iterator _iter1702; + for (_iter1702 = this->group_names.begin(); _iter1702 != this->group_names.end(); ++_iter1702) { - xfer += oprot->writeString((*_iter1660)); + xfer += oprot->writeString((*_iter1702)); } xfer += oprot->writeListEnd(); } @@ -31704,10 +31704,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1661; - for (_iter1661 = (*(this->group_names)).begin(); _iter1661 != (*(this->group_names)).end(); ++_iter1661) + std::vector ::const_iterator _iter1703; + for (_iter1703 = (*(this->group_names)).begin(); _iter1703 != (*(this->group_names)).end(); ++_iter1703) { - xfer += oprot->writeString((*_iter1661)); + xfer += oprot->writeString((*_iter1703)); } xfer += oprot->writeListEnd(); } @@ -31882,9 +31882,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1662; - xfer += iprot->readI32(ecast1662); - this->principal_type = (PrincipalType::type)ecast1662; + int32_t ecast1704; + xfer += iprot->readI32(ecast1704); + this->principal_type = (PrincipalType::type)ecast1704; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -31989,14 +31989,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1663; - ::apache::thrift::protocol::TType _etype1666; - xfer += iprot->readListBegin(_etype1666, _size1663); - this->success.resize(_size1663); - uint32_t _i1667; - for (_i1667 = 0; _i1667 < _size1663; ++_i1667) + uint32_t _size1705; + ::apache::thrift::protocol::TType _etype1708; + xfer += iprot->readListBegin(_etype1708, _size1705); + this->success.resize(_size1705); + uint32_t _i1709; + for (_i1709 = 0; _i1709 < _size1705; ++_i1709) { - xfer += this->success[_i1667].read(iprot); + xfer += this->success[_i1709].read(iprot); } xfer += iprot->readListEnd(); } @@ -32035,10 +32035,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1668; - for (_iter1668 = this->success.begin(); _iter1668 != this->success.end(); ++_iter1668) + std::vector ::const_iterator _iter1710; + for (_iter1710 = this->success.begin(); _iter1710 != this->success.end(); ++_iter1710) { - xfer += (*_iter1668).write(oprot); + xfer += (*_iter1710).write(oprot); } xfer += oprot->writeListEnd(); } @@ -32083,14 +32083,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1669; - ::apache::thrift::protocol::TType _etype1672; - xfer += iprot->readListBegin(_etype1672, _size1669); - (*(this->success)).resize(_size1669); - uint32_t _i1673; - for (_i1673 = 0; _i1673 < _size1669; ++_i1673) + uint32_t _size1711; + ::apache::thrift::protocol::TType _etype1714; + xfer += iprot->readListBegin(_etype1714, _size1711); + (*(this->success)).resize(_size1711); + uint32_t _i1715; + for (_i1715 = 0; _i1715 < _size1711; ++_i1715) { - xfer += (*(this->success))[_i1673].read(iprot); + xfer += (*(this->success))[_i1715].read(iprot); } xfer += iprot->readListEnd(); } @@ -32778,14 +32778,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1674; - ::apache::thrift::protocol::TType _etype1677; - xfer += iprot->readListBegin(_etype1677, _size1674); - this->group_names.resize(_size1674); - uint32_t _i1678; - for (_i1678 = 0; _i1678 < _size1674; ++_i1678) + uint32_t _size1716; + ::apache::thrift::protocol::TType _etype1719; + xfer += iprot->readListBegin(_etype1719, _size1716); + this->group_names.resize(_size1716); + uint32_t _i1720; + for (_i1720 = 0; _i1720 < _size1716; ++_i1720) { - xfer += iprot->readString(this->group_names[_i1678]); + xfer += iprot->readString(this->group_names[_i1720]); } xfer += iprot->readListEnd(); } @@ -32818,10 +32818,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1679; - for (_iter1679 = this->group_names.begin(); _iter1679 != this->group_names.end(); ++_iter1679) + std::vector ::const_iterator _iter1721; + for (_iter1721 = this->group_names.begin(); _iter1721 != this->group_names.end(); ++_iter1721) { - xfer += oprot->writeString((*_iter1679)); + xfer += oprot->writeString((*_iter1721)); } xfer += oprot->writeListEnd(); } @@ -32849,10 +32849,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1680; - for (_iter1680 = (*(this->group_names)).begin(); _iter1680 != (*(this->group_names)).end(); ++_iter1680) + std::vector ::const_iterator _iter1722; + for (_iter1722 = (*(this->group_names)).begin(); _iter1722 != (*(this->group_names)).end(); ++_iter1722) { - xfer += oprot->writeString((*_iter1680)); + xfer += oprot->writeString((*_iter1722)); } xfer += oprot->writeListEnd(); } @@ -32893,14 +32893,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1681; - ::apache::thrift::protocol::TType _etype1684; - xfer += iprot->readListBegin(_etype1684, _size1681); - this->success.resize(_size1681); - uint32_t _i1685; - for (_i1685 = 0; _i1685 < _size1681; ++_i1685) + uint32_t _size1723; + ::apache::thrift::protocol::TType _etype1726; + xfer += iprot->readListBegin(_etype1726, _size1723); + this->success.resize(_size1723); + uint32_t _i1727; + for (_i1727 = 0; _i1727 < _size1723; ++_i1727) { - xfer += iprot->readString(this->success[_i1685]); + xfer += iprot->readString(this->success[_i1727]); } xfer += iprot->readListEnd(); } @@ -32939,10 +32939,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1686; - for (_iter1686 = this->success.begin(); _iter1686 != this->success.end(); ++_iter1686) + std::vector ::const_iterator _iter1728; + for (_iter1728 = this->success.begin(); _iter1728 != this->success.end(); ++_iter1728) { - xfer += oprot->writeString((*_iter1686)); + xfer += oprot->writeString((*_iter1728)); } xfer += oprot->writeListEnd(); } @@ -32987,14 +32987,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1687; - ::apache::thrift::protocol::TType _etype1690; - xfer += iprot->readListBegin(_etype1690, _size1687); - (*(this->success)).resize(_size1687); - uint32_t _i1691; - for (_i1691 = 0; _i1691 < _size1687; ++_i1691) + uint32_t _size1729; + ::apache::thrift::protocol::TType _etype1732; + xfer += iprot->readListBegin(_etype1732, _size1729); + (*(this->success)).resize(_size1729); + uint32_t _i1733; + for (_i1733 = 0; _i1733 < _size1729; ++_i1733) { - xfer += iprot->readString((*(this->success))[_i1691]); + xfer += iprot->readString((*(this->success))[_i1733]); } xfer += iprot->readListEnd(); } @@ -34305,14 +34305,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1692; - ::apache::thrift::protocol::TType _etype1695; - xfer += iprot->readListBegin(_etype1695, _size1692); - this->success.resize(_size1692); - uint32_t _i1696; - for (_i1696 = 0; _i1696 < _size1692; ++_i1696) + uint32_t _size1734; + ::apache::thrift::protocol::TType _etype1737; + xfer += iprot->readListBegin(_etype1737, _size1734); + this->success.resize(_size1734); + uint32_t _i1738; + for (_i1738 = 0; _i1738 < _size1734; ++_i1738) { - xfer += iprot->readString(this->success[_i1696]); + xfer += iprot->readString(this->success[_i1738]); } xfer += iprot->readListEnd(); } @@ -34343,10 +34343,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1697; - for (_iter1697 = this->success.begin(); _iter1697 != this->success.end(); ++_iter1697) + std::vector ::const_iterator _iter1739; + for (_iter1739 = this->success.begin(); _iter1739 != this->success.end(); ++_iter1739) { - xfer += oprot->writeString((*_iter1697)); + xfer += oprot->writeString((*_iter1739)); } xfer += oprot->writeListEnd(); } @@ -34387,14 +34387,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1698; - ::apache::thrift::protocol::TType _etype1701; - xfer += iprot->readListBegin(_etype1701, _size1698); - (*(this->success)).resize(_size1698); - uint32_t _i1702; - for (_i1702 = 0; _i1702 < _size1698; ++_i1702) + uint32_t _size1740; + ::apache::thrift::protocol::TType _etype1743; + xfer += iprot->readListBegin(_etype1743, _size1740); + (*(this->success)).resize(_size1740); + uint32_t _i1744; + for (_i1744 = 0; _i1744 < _size1740; ++_i1744) { - xfer += iprot->readString((*(this->success))[_i1702]); + xfer += iprot->readString((*(this->success))[_i1744]); } xfer += iprot->readListEnd(); } @@ -35120,14 +35120,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1703; - ::apache::thrift::protocol::TType _etype1706; - xfer += iprot->readListBegin(_etype1706, _size1703); - this->success.resize(_size1703); - uint32_t _i1707; - for (_i1707 = 0; _i1707 < _size1703; ++_i1707) + uint32_t _size1745; + ::apache::thrift::protocol::TType _etype1748; + xfer += iprot->readListBegin(_etype1748, _size1745); + this->success.resize(_size1745); + uint32_t _i1749; + for (_i1749 = 0; _i1749 < _size1745; ++_i1749) { - xfer += iprot->readString(this->success[_i1707]); + xfer += iprot->readString(this->success[_i1749]); } xfer += iprot->readListEnd(); } @@ -35158,10 +35158,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1708; - for (_iter1708 = this->success.begin(); _iter1708 != this->success.end(); ++_iter1708) + std::vector ::const_iterator _iter1750; + for (_iter1750 = this->success.begin(); _iter1750 != this->success.end(); ++_iter1750) { - xfer += oprot->writeString((*_iter1708)); + xfer += oprot->writeString((*_iter1750)); } xfer += oprot->writeListEnd(); } @@ -35202,14 +35202,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1709; - ::apache::thrift::protocol::TType _etype1712; - xfer += iprot->readListBegin(_etype1712, _size1709); - (*(this->success)).resize(_size1709); - uint32_t _i1713; - for (_i1713 = 0; _i1713 < _size1709; ++_i1713) + uint32_t _size1751; + ::apache::thrift::protocol::TType _etype1754; + xfer += iprot->readListBegin(_etype1754, _size1751); + (*(this->success)).resize(_size1751); + uint32_t _i1755; + for (_i1755 = 0; _i1755 < _size1751; ++_i1755) { - xfer += iprot->readString((*(this->success))[_i1713]); + xfer += iprot->readString((*(this->success))[_i1755]); } xfer += iprot->readListEnd(); } @@ -36331,11 +36331,11 @@ uint32_t ThriftHiveMetastore_commit_txn_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_lock_args::~ThriftHiveMetastore_lock_args() throw() { +ThriftHiveMetastore_get_open_write_ids_args::~ThriftHiveMetastore_get_open_write_ids_args() throw() { } -uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_open_write_ids_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36376,10 +36376,10 @@ uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtoc return xfer; } -uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_open_write_ids_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_write_ids_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -36391,14 +36391,14 @@ uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProto } -ThriftHiveMetastore_lock_pargs::~ThriftHiveMetastore_lock_pargs() throw() { +ThriftHiveMetastore_get_open_write_ids_pargs::~ThriftHiveMetastore_get_open_write_ids_pargs() throw() { } -uint32_t ThriftHiveMetastore_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_open_write_ids_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_write_ids_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -36410,11 +36410,11 @@ uint32_t ThriftHiveMetastore_lock_pargs::write(::apache::thrift::protocol::TProt } -ThriftHiveMetastore_lock_result::~ThriftHiveMetastore_lock_result() throw() { +ThriftHiveMetastore_get_open_write_ids_result::~ThriftHiveMetastore_get_open_write_ids_result() throw() { } -uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_open_write_ids_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36471,11 +36471,11 @@ uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProt return xfer; } -uint32_t ThriftHiveMetastore_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_open_write_ids_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_write_ids_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -36496,11 +36496,11 @@ uint32_t ThriftHiveMetastore_lock_result::write(::apache::thrift::protocol::TPro } -ThriftHiveMetastore_lock_presult::~ThriftHiveMetastore_lock_presult() throw() { +ThriftHiveMetastore_get_open_write_ids_presult::~ThriftHiveMetastore_get_open_write_ids_presult() throw() { } -uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_open_write_ids_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36558,11 +36558,11 @@ uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TPro } -ThriftHiveMetastore_check_lock_args::~ThriftHiveMetastore_check_lock_args() throw() { +ThriftHiveMetastore_allocate_table_write_id_args::~ThriftHiveMetastore_allocate_table_write_id_args() throw() { } -uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_allocate_table_write_id_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36603,10 +36603,10 @@ uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_check_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_allocate_table_write_id_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_allocate_table_write_id_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -36618,14 +36618,14 @@ uint32_t ThriftHiveMetastore_check_lock_args::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_check_lock_pargs::~ThriftHiveMetastore_check_lock_pargs() throw() { +ThriftHiveMetastore_allocate_table_write_id_pargs::~ThriftHiveMetastore_allocate_table_write_id_pargs() throw() { } -uint32_t ThriftHiveMetastore_check_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_allocate_table_write_id_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_allocate_table_write_id_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -36637,11 +36637,11 @@ uint32_t ThriftHiveMetastore_check_lock_pargs::write(::apache::thrift::protocol: } -ThriftHiveMetastore_check_lock_result::~ThriftHiveMetastore_check_lock_result() throw() { +ThriftHiveMetastore_allocate_table_write_id_result::~ThriftHiveMetastore_allocate_table_write_id_result() throw() { } -uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_allocate_table_write_id_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36706,11 +36706,11 @@ uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_check_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_allocate_table_write_id_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_allocate_table_write_id_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -36735,11 +36735,11 @@ uint32_t ThriftHiveMetastore_check_lock_result::write(::apache::thrift::protocol } -ThriftHiveMetastore_check_lock_presult::~ThriftHiveMetastore_check_lock_presult() throw() { +ThriftHiveMetastore_allocate_table_write_id_presult::~ThriftHiveMetastore_allocate_table_write_id_presult() throw() { } -uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_allocate_table_write_id_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36805,11 +36805,11 @@ uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_unlock_args::~ThriftHiveMetastore_unlock_args() throw() { +ThriftHiveMetastore_lock_args::~ThriftHiveMetastore_lock_args() throw() { } -uint32_t ThriftHiveMetastore_unlock_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36850,10 +36850,10 @@ uint32_t ThriftHiveMetastore_unlock_args::read(::apache::thrift::protocol::TProt return xfer; } -uint32_t ThriftHiveMetastore_unlock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -36865,14 +36865,14 @@ uint32_t ThriftHiveMetastore_unlock_args::write(::apache::thrift::protocol::TPro } -ThriftHiveMetastore_unlock_pargs::~ThriftHiveMetastore_unlock_pargs() throw() { +ThriftHiveMetastore_lock_pargs::~ThriftHiveMetastore_lock_pargs() throw() { } -uint32_t ThriftHiveMetastore_unlock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -36884,11 +36884,11 @@ uint32_t ThriftHiveMetastore_unlock_pargs::write(::apache::thrift::protocol::TPr } -ThriftHiveMetastore_unlock_result::~ThriftHiveMetastore_unlock_result() throw() { +ThriftHiveMetastore_lock_result::~ThriftHiveMetastore_lock_result() throw() { } -uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36909,6 +36909,14 @@ uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TPr } switch (fid) { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -36937,13 +36945,17 @@ uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TPr return xfer; } -uint32_t ThriftHiveMetastore_unlock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_result"); - if (this->__isset.o1) { + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); @@ -36958,11 +36970,11 @@ uint32_t ThriftHiveMetastore_unlock_result::write(::apache::thrift::protocol::TP } -ThriftHiveMetastore_unlock_presult::~ThriftHiveMetastore_unlock_presult() throw() { +ThriftHiveMetastore_lock_presult::~ThriftHiveMetastore_lock_presult() throw() { } -uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36983,6 +36995,14 @@ uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TP } switch (fid) { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -37012,11 +37032,11 @@ uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TP } -ThriftHiveMetastore_show_locks_args::~ThriftHiveMetastore_show_locks_args() throw() { +ThriftHiveMetastore_check_lock_args::~ThriftHiveMetastore_check_lock_args() throw() { } -uint32_t ThriftHiveMetastore_show_locks_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37057,10 +37077,10 @@ uint32_t ThriftHiveMetastore_show_locks_args::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_show_locks_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_check_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -37072,14 +37092,14 @@ uint32_t ThriftHiveMetastore_show_locks_args::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_show_locks_pargs::~ThriftHiveMetastore_show_locks_pargs() throw() { +ThriftHiveMetastore_check_lock_pargs::~ThriftHiveMetastore_check_lock_pargs() throw() { } -uint32_t ThriftHiveMetastore_show_locks_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_check_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -37091,11 +37111,11 @@ uint32_t ThriftHiveMetastore_show_locks_pargs::write(::apache::thrift::protocol: } -ThriftHiveMetastore_show_locks_result::~ThriftHiveMetastore_show_locks_result() throw() { +ThriftHiveMetastore_check_lock_result::~ThriftHiveMetastore_check_lock_result() throw() { } -uint32_t ThriftHiveMetastore_show_locks_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37124,6 +37144,30 @@ uint32_t ThriftHiveMetastore_show_locks_result::read(::apache::thrift::protocol: xfer += iprot->skip(ftype); } break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -37136,16 +37180,28 @@ uint32_t ThriftHiveMetastore_show_locks_result::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_show_locks_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_check_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -37153,11 +37209,11 @@ uint32_t ThriftHiveMetastore_show_locks_result::write(::apache::thrift::protocol } -ThriftHiveMetastore_show_locks_presult::~ThriftHiveMetastore_show_locks_presult() throw() { +ThriftHiveMetastore_check_lock_presult::~ThriftHiveMetastore_check_lock_presult() throw() { } -uint32_t ThriftHiveMetastore_show_locks_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37186,6 +37242,30 @@ uint32_t ThriftHiveMetastore_show_locks_presult::read(::apache::thrift::protocol xfer += iprot->skip(ftype); } break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -37199,11 +37279,11 @@ uint32_t ThriftHiveMetastore_show_locks_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_heartbeat_args::~ThriftHiveMetastore_heartbeat_args() throw() { +ThriftHiveMetastore_unlock_args::~ThriftHiveMetastore_unlock_args() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_unlock_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37226,8 +37306,8 @@ uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TP { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ids.read(iprot); - this->__isset.ids = true; + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; } else { xfer += iprot->skip(ftype); } @@ -37244,13 +37324,13 @@ uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TP return xfer; } -uint32_t ThriftHiveMetastore_heartbeat_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_unlock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_args"); - xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->ids.write(oprot); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -37259,17 +37339,17 @@ uint32_t ThriftHiveMetastore_heartbeat_args::write(::apache::thrift::protocol::T } -ThriftHiveMetastore_heartbeat_pargs::~ThriftHiveMetastore_heartbeat_pargs() throw() { +ThriftHiveMetastore_unlock_pargs::~ThriftHiveMetastore_unlock_pargs() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_unlock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_pargs"); - xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->ids)).write(oprot); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -37278,11 +37358,11 @@ uint32_t ThriftHiveMetastore_heartbeat_pargs::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_heartbeat_result::~ThriftHiveMetastore_heartbeat_result() throw() { +ThriftHiveMetastore_unlock_result::~ThriftHiveMetastore_unlock_result() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37319,14 +37399,6 @@ uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol:: xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -37339,11 +37411,11 @@ uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol:: return xfer; } -uint32_t ThriftHiveMetastore_heartbeat_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_unlock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_result"); if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -37353,10 +37425,6 @@ uint32_t ThriftHiveMetastore_heartbeat_result::write(::apache::thrift::protocol: xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -37364,11 +37432,11 @@ uint32_t ThriftHiveMetastore_heartbeat_result::write(::apache::thrift::protocol: } -ThriftHiveMetastore_heartbeat_presult::~ThriftHiveMetastore_heartbeat_presult() throw() { +ThriftHiveMetastore_unlock_presult::~ThriftHiveMetastore_unlock_presult() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37405,14 +37473,6 @@ uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol: xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -37426,11 +37486,11 @@ uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol: } -ThriftHiveMetastore_heartbeat_txn_range_args::~ThriftHiveMetastore_heartbeat_txn_range_args() throw() { +ThriftHiveMetastore_show_locks_args::~ThriftHiveMetastore_show_locks_args() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_locks_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37453,8 +37513,8 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::pr { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->txns.read(iprot); - this->__isset.txns = true; + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; } else { xfer += iprot->skip(ftype); } @@ -37471,13 +37531,13 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::pr return xfer; } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_locks_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_args"); - xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->txns.write(oprot); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -37486,17 +37546,17 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::write(::apache::thrift::p } -ThriftHiveMetastore_heartbeat_txn_range_pargs::~ThriftHiveMetastore_heartbeat_txn_range_pargs() throw() { +ThriftHiveMetastore_show_locks_pargs::~ThriftHiveMetastore_show_locks_pargs() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_locks_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_pargs"); - xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->txns)).write(oprot); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -37505,11 +37565,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_pargs::write(::apache::thrift:: } -ThriftHiveMetastore_heartbeat_txn_range_result::~ThriftHiveMetastore_heartbeat_txn_range_result() throw() { +ThriftHiveMetastore_show_locks_result::~ThriftHiveMetastore_show_locks_result() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_locks_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37550,11 +37610,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::read(::apache::thrift:: return xfer; } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_locks_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -37567,11 +37627,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::write(::apache::thrift: } -ThriftHiveMetastore_heartbeat_txn_range_presult::~ThriftHiveMetastore_heartbeat_txn_range_presult() throw() { +ThriftHiveMetastore_show_locks_presult::~ThriftHiveMetastore_show_locks_presult() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_locks_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37613,11 +37673,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_presult::read(::apache::thrift: } -ThriftHiveMetastore_compact_args::~ThriftHiveMetastore_compact_args() throw() { +ThriftHiveMetastore_heartbeat_args::~ThriftHiveMetastore_heartbeat_args() throw() { } -uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37640,8 +37700,8 @@ uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TPro { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->rqst.read(iprot); - this->__isset.rqst = true; + xfer += this->ids.read(iprot); + this->__isset.ids = true; } else { xfer += iprot->skip(ftype); } @@ -37658,13 +37718,13 @@ uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TPro return xfer; } -uint32_t ThriftHiveMetastore_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_heartbeat_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_args"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->rqst.write(oprot); + xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->ids.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -37673,17 +37733,17 @@ uint32_t ThriftHiveMetastore_compact_args::write(::apache::thrift::protocol::TPr } -ThriftHiveMetastore_compact_pargs::~ThriftHiveMetastore_compact_pargs() throw() { +ThriftHiveMetastore_heartbeat_pargs::~ThriftHiveMetastore_heartbeat_pargs() throw() { } -uint32_t ThriftHiveMetastore_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_heartbeat_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_pargs"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->rqst)).write(oprot); + xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->ids)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -37692,11 +37752,11 @@ uint32_t ThriftHiveMetastore_compact_pargs::write(::apache::thrift::protocol::TP } -ThriftHiveMetastore_compact_result::~ThriftHiveMetastore_compact_result() throw() { +ThriftHiveMetastore_heartbeat_result::~ThriftHiveMetastore_heartbeat_result() throw() { } -uint32_t ThriftHiveMetastore_compact_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37715,7 +37775,36 @@ uint32_t ThriftHiveMetastore_compact_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - xfer += iprot->skip(ftype); + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } xfer += iprot->readFieldEnd(); } @@ -37724,23 +37813,36 @@ uint32_t ThriftHiveMetastore_compact_result::read(::apache::thrift::protocol::TP return xfer; } -uint32_t ThriftHiveMetastore_compact_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_heartbeat_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_result"); + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -ThriftHiveMetastore_compact_presult::~ThriftHiveMetastore_compact_presult() throw() { +ThriftHiveMetastore_heartbeat_presult::~ThriftHiveMetastore_heartbeat_presult() throw() { } -uint32_t ThriftHiveMetastore_compact_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37759,7 +37861,36 @@ uint32_t ThriftHiveMetastore_compact_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - xfer += iprot->skip(ftype); + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } xfer += iprot->readFieldEnd(); } @@ -37769,11 +37900,11 @@ uint32_t ThriftHiveMetastore_compact_presult::read(::apache::thrift::protocol::T } -ThriftHiveMetastore_compact2_args::~ThriftHiveMetastore_compact2_args() throw() { +ThriftHiveMetastore_heartbeat_txn_range_args::~ThriftHiveMetastore_heartbeat_txn_range_args() throw() { } -uint32_t ThriftHiveMetastore_compact2_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37796,8 +37927,8 @@ uint32_t ThriftHiveMetastore_compact2_args::read(::apache::thrift::protocol::TPr { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->rqst.read(iprot); - this->__isset.rqst = true; + xfer += this->txns.read(iprot); + this->__isset.txns = true; } else { xfer += iprot->skip(ftype); } @@ -37814,13 +37945,13 @@ uint32_t ThriftHiveMetastore_compact2_args::read(::apache::thrift::protocol::TPr return xfer; } -uint32_t ThriftHiveMetastore_compact2_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact2_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_args"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->rqst.write(oprot); + xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->txns.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -37829,17 +37960,17 @@ uint32_t ThriftHiveMetastore_compact2_args::write(::apache::thrift::protocol::TP } -ThriftHiveMetastore_compact2_pargs::~ThriftHiveMetastore_compact2_pargs() throw() { +ThriftHiveMetastore_heartbeat_txn_range_pargs::~ThriftHiveMetastore_heartbeat_txn_range_pargs() throw() { } -uint32_t ThriftHiveMetastore_compact2_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_heartbeat_txn_range_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact2_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_pargs"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->rqst)).write(oprot); + xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->txns)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -37848,11 +37979,11 @@ uint32_t ThriftHiveMetastore_compact2_pargs::write(::apache::thrift::protocol::T } -ThriftHiveMetastore_compact2_result::~ThriftHiveMetastore_compact2_result() throw() { +ThriftHiveMetastore_heartbeat_txn_range_result::~ThriftHiveMetastore_heartbeat_txn_range_result() throw() { } -uint32_t ThriftHiveMetastore_compact2_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37893,11 +38024,11 @@ uint32_t ThriftHiveMetastore_compact2_result::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_compact2_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact2_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -37910,11 +38041,11 @@ uint32_t ThriftHiveMetastore_compact2_result::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_compact2_presult::~ThriftHiveMetastore_compact2_presult() throw() { +ThriftHiveMetastore_heartbeat_txn_range_presult::~ThriftHiveMetastore_heartbeat_txn_range_presult() throw() { } -uint32_t ThriftHiveMetastore_compact2_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_heartbeat_txn_range_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37956,11 +38087,11 @@ uint32_t ThriftHiveMetastore_compact2_presult::read(::apache::thrift::protocol:: } -ThriftHiveMetastore_show_compact_args::~ThriftHiveMetastore_show_compact_args() throw() { +ThriftHiveMetastore_compact_args::~ThriftHiveMetastore_compact_args() throw() { } -uint32_t ThriftHiveMetastore_show_compact_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38001,10 +38132,10 @@ uint32_t ThriftHiveMetastore_show_compact_args::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_show_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -38016,14 +38147,14 @@ uint32_t ThriftHiveMetastore_show_compact_args::write(::apache::thrift::protocol } -ThriftHiveMetastore_show_compact_pargs::~ThriftHiveMetastore_show_compact_pargs() throw() { +ThriftHiveMetastore_compact_pargs::~ThriftHiveMetastore_compact_pargs() throw() { } -uint32_t ThriftHiveMetastore_show_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -38035,11 +38166,11 @@ uint32_t ThriftHiveMetastore_show_compact_pargs::write(::apache::thrift::protoco } -ThriftHiveMetastore_show_compact_result::~ThriftHiveMetastore_show_compact_result() throw() { +ThriftHiveMetastore_compact_result::~ThriftHiveMetastore_compact_result() throw() { } -uint32_t ThriftHiveMetastore_show_compact_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_compact_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38058,20 +38189,7 @@ uint32_t ThriftHiveMetastore_show_compact_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -38080,28 +38198,23 @@ uint32_t ThriftHiveMetastore_show_compact_result::read(::apache::thrift::protoco return xfer; } -uint32_t ThriftHiveMetastore_show_compact_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_compact_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); - xfer += oprot->writeFieldEnd(); - } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -ThriftHiveMetastore_show_compact_presult::~ThriftHiveMetastore_show_compact_presult() throw() { +ThriftHiveMetastore_compact_presult::~ThriftHiveMetastore_compact_presult() throw() { } -uint32_t ThriftHiveMetastore_show_compact_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_compact_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38120,20 +38233,7 @@ uint32_t ThriftHiveMetastore_show_compact_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -38143,11 +38243,11 @@ uint32_t ThriftHiveMetastore_show_compact_presult::read(::apache::thrift::protoc } -ThriftHiveMetastore_add_dynamic_partitions_args::~ThriftHiveMetastore_add_dynamic_partitions_args() throw() { +ThriftHiveMetastore_compact2_args::~ThriftHiveMetastore_compact2_args() throw() { } -uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_compact2_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38188,10 +38288,10 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::read(::apache::thrift: return xfer; } -uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_compact2_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact2_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -38203,14 +38303,14 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::write(::apache::thrift } -ThriftHiveMetastore_add_dynamic_partitions_pargs::~ThriftHiveMetastore_add_dynamic_partitions_pargs() throw() { +ThriftHiveMetastore_compact2_pargs::~ThriftHiveMetastore_compact2_pargs() throw() { } -uint32_t ThriftHiveMetastore_add_dynamic_partitions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_compact2_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact2_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -38222,11 +38322,11 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_pargs::write(::apache::thrif } -ThriftHiveMetastore_add_dynamic_partitions_result::~ThriftHiveMetastore_add_dynamic_partitions_result() throw() { +ThriftHiveMetastore_compact2_result::~ThriftHiveMetastore_compact2_result() throw() { } -uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_compact2_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38247,18 +38347,10 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::read(::apache::thrif } switch (fid) { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: + case 0: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; + xfer += this->success.read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } @@ -38275,19 +38367,15 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_compact2_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact2_result"); - if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); @@ -38296,11 +38384,11 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::write(::apache::thri } -ThriftHiveMetastore_add_dynamic_partitions_presult::~ThriftHiveMetastore_add_dynamic_partitions_presult() throw() { +ThriftHiveMetastore_compact2_presult::~ThriftHiveMetastore_compact2_presult() throw() { } -uint32_t ThriftHiveMetastore_add_dynamic_partitions_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_compact2_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38321,18 +38409,10 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_presult::read(::apache::thri } switch (fid) { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: + case 0: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } @@ -38350,11 +38430,11 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_presult::read(::apache::thri } -ThriftHiveMetastore_get_next_notification_args::~ThriftHiveMetastore_get_next_notification_args() throw() { +ThriftHiveMetastore_show_compact_args::~ThriftHiveMetastore_show_compact_args() throw() { } -uint32_t ThriftHiveMetastore_get_next_notification_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_compact_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38395,10 +38475,10 @@ uint32_t ThriftHiveMetastore_get_next_notification_args::read(::apache::thrift:: return xfer; } -uint32_t ThriftHiveMetastore_get_next_notification_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -38410,14 +38490,14 @@ uint32_t ThriftHiveMetastore_get_next_notification_args::write(::apache::thrift: } -ThriftHiveMetastore_get_next_notification_pargs::~ThriftHiveMetastore_get_next_notification_pargs() throw() { +ThriftHiveMetastore_show_compact_pargs::~ThriftHiveMetastore_show_compact_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_next_notification_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -38429,11 +38509,11 @@ uint32_t ThriftHiveMetastore_get_next_notification_pargs::write(::apache::thrift } -ThriftHiveMetastore_get_next_notification_result::~ThriftHiveMetastore_get_next_notification_result() throw() { +ThriftHiveMetastore_show_compact_result::~ThriftHiveMetastore_show_compact_result() throw() { } -uint32_t ThriftHiveMetastore_get_next_notification_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_compact_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38474,11 +38554,11 @@ uint32_t ThriftHiveMetastore_get_next_notification_result::read(::apache::thrift return xfer; } -uint32_t ThriftHiveMetastore_get_next_notification_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_compact_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -38491,11 +38571,11 @@ uint32_t ThriftHiveMetastore_get_next_notification_result::write(::apache::thrif } -ThriftHiveMetastore_get_next_notification_presult::~ThriftHiveMetastore_get_next_notification_presult() throw() { +ThriftHiveMetastore_show_compact_presult::~ThriftHiveMetastore_show_compact_presult() throw() { } -uint32_t ThriftHiveMetastore_get_next_notification_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_compact_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38537,11 +38617,11 @@ uint32_t ThriftHiveMetastore_get_next_notification_presult::read(::apache::thrif } -ThriftHiveMetastore_get_current_notificationEventId_args::~ThriftHiveMetastore_get_current_notificationEventId_args() throw() { +ThriftHiveMetastore_add_dynamic_partitions_args::~ThriftHiveMetastore_add_dynamic_partitions_args() throw() { } -uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38560,7 +38640,20 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::read(::apache if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - xfer += iprot->skip(ftype); + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } xfer += iprot->readFieldEnd(); } @@ -38569,10 +38662,14 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::read(::apache return xfer; } -uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_args"); + + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -38580,14 +38677,18 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::write(::apach } -ThriftHiveMetastore_get_current_notificationEventId_pargs::~ThriftHiveMetastore_get_current_notificationEventId_pargs() throw() { +ThriftHiveMetastore_add_dynamic_partitions_pargs::~ThriftHiveMetastore_add_dynamic_partitions_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_current_notificationEventId_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_add_dynamic_partitions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_pargs"); + + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -38595,11 +38696,11 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_pargs::write(::apac } -ThriftHiveMetastore_get_current_notificationEventId_result::~ThriftHiveMetastore_get_current_notificationEventId_result() throw() { +ThriftHiveMetastore_add_dynamic_partitions_result::~ThriftHiveMetastore_add_dynamic_partitions_result() throw() { } -uint32_t ThriftHiveMetastore_get_current_notificationEventId_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38620,10 +38721,18 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_result::read(::apac } switch (fid) { - case 0: + case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); - this->__isset.success = true; + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; } else { xfer += iprot->skip(ftype); } @@ -38640,15 +38749,19 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_result::read(::apac return xfer; } -uint32_t ThriftHiveMetastore_get_current_notificationEventId_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); @@ -38657,11 +38770,11 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_result::write(::apa } -ThriftHiveMetastore_get_current_notificationEventId_presult::~ThriftHiveMetastore_get_current_notificationEventId_presult() throw() { +ThriftHiveMetastore_add_dynamic_partitions_presult::~ThriftHiveMetastore_add_dynamic_partitions_presult() throw() { } -uint32_t ThriftHiveMetastore_get_current_notificationEventId_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_add_dynamic_partitions_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38682,10 +38795,18 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_presult::read(::apa } switch (fid) { - case 0: + case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); - this->__isset.success = true; + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; } else { xfer += iprot->skip(ftype); } @@ -38703,11 +38824,11 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_presult::read(::apa } -ThriftHiveMetastore_get_notification_events_count_args::~ThriftHiveMetastore_get_notification_events_count_args() throw() { +ThriftHiveMetastore_get_next_notification_args::~ThriftHiveMetastore_get_next_notification_args() throw() { } -uint32_t ThriftHiveMetastore_get_notification_events_count_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_next_notification_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38748,10 +38869,10 @@ uint32_t ThriftHiveMetastore_get_notification_events_count_args::read(::apache:: return xfer; } -uint32_t ThriftHiveMetastore_get_notification_events_count_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_next_notification_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_notification_events_count_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -38763,14 +38884,14 @@ uint32_t ThriftHiveMetastore_get_notification_events_count_args::write(::apache: } -ThriftHiveMetastore_get_notification_events_count_pargs::~ThriftHiveMetastore_get_notification_events_count_pargs() throw() { +ThriftHiveMetastore_get_next_notification_pargs::~ThriftHiveMetastore_get_next_notification_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_notification_events_count_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_next_notification_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_notification_events_count_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -38782,11 +38903,364 @@ uint32_t ThriftHiveMetastore_get_notification_events_count_pargs::write(::apache } -ThriftHiveMetastore_get_notification_events_count_result::~ThriftHiveMetastore_get_notification_events_count_result() throw() { +ThriftHiveMetastore_get_next_notification_result::~ThriftHiveMetastore_get_next_notification_result() throw() { } -uint32_t ThriftHiveMetastore_get_notification_events_count_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_next_notification_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_next_notification_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_next_notification_presult::~ThriftHiveMetastore_get_next_notification_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_next_notification_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_current_notificationEventId_args::~ThriftHiveMetastore_get_current_notificationEventId_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_args"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_current_notificationEventId_pargs::~ThriftHiveMetastore_get_current_notificationEventId_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_current_notificationEventId_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_pargs"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_current_notificationEventId_result::~ThriftHiveMetastore_get_current_notificationEventId_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_current_notificationEventId_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_current_notificationEventId_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_current_notificationEventId_presult::~ThriftHiveMetastore_get_current_notificationEventId_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_current_notificationEventId_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_notification_events_count_args::~ThriftHiveMetastore_get_notification_events_count_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_notification_events_count_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_notification_events_count_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_notification_events_count_args"); + + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_notification_events_count_pargs::~ThriftHiveMetastore_get_notification_events_count_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_notification_events_count_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_notification_events_count_pargs"); + + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_notification_events_count_result::~ThriftHiveMetastore_get_notification_events_count_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_notification_events_count_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -53776,6 +54250,137 @@ void ThriftHiveMetastoreClient::recv_commit_txn() return; } +void ThriftHiveMetastoreClient::get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst) +{ + send_get_open_write_ids(rqst); + recv_get_open_write_ids(_return); +} + +void ThriftHiveMetastoreClient::send_get_open_write_ids(const GetOpenWriteIdsRequest& rqst) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_open_write_ids", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_open_write_ids_pargs args; + args.rqst = &rqst; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_open_write_ids(GetOpenWriteIdsResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_open_write_ids") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_open_write_ids_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_write_ids failed: unknown result"); +} + +void ThriftHiveMetastoreClient::allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst) +{ + send_allocate_table_write_id(rqst); + recv_allocate_table_write_id(_return); +} + +void ThriftHiveMetastoreClient::send_allocate_table_write_id(const AllocateTableWriteIdRequest& rqst) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("allocate_table_write_id", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_allocate_table_write_id_pargs args; + args.rqst = &rqst; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_allocate_table_write_id(AllocateTableWriteIdResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("allocate_table_write_id") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_allocate_table_write_id_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "allocate_table_write_id failed: unknown result"); +} + void ThriftHiveMetastoreClient::lock(LockResponse& _return, const LockRequest& rqst) { send_lock(rqst); @@ -64681,6 +65286,129 @@ void ThriftHiveMetastoreProcessor::process_commit_txn(int32_t seqid, ::apache::t } } +void ThriftHiveMetastoreProcessor::process_get_open_write_ids(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_open_write_ids", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_open_write_ids"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_open_write_ids"); + } + + ThriftHiveMetastore_get_open_write_ids_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_open_write_ids", bytes); + } + + ThriftHiveMetastore_get_open_write_ids_result result; + try { + iface_->get_open_write_ids(result.success, args.rqst); + result.__isset.success = true; + } catch (NoSuchTxnException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_open_write_ids"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_open_write_ids", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_open_write_ids"); + } + + oprot->writeMessageBegin("get_open_write_ids", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_open_write_ids", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_allocate_table_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.allocate_table_write_id", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.allocate_table_write_id"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.allocate_table_write_id"); + } + + ThriftHiveMetastore_allocate_table_write_id_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.allocate_table_write_id", bytes); + } + + ThriftHiveMetastore_allocate_table_write_id_result result; + try { + iface_->allocate_table_write_id(result.success, args.rqst); + result.__isset.success = true; + } catch (NoSuchTxnException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (TxnAbortedException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.allocate_table_write_id"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("allocate_table_write_id", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.allocate_table_write_id"); + } + + oprot->writeMessageBegin("allocate_table_write_id", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.allocate_table_write_id", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_lock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; @@ -79994,6 +80722,194 @@ void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) } // end while(true) } +void ThriftHiveMetastoreConcurrentClient::get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst) +{ + int32_t seqid = send_get_open_write_ids(rqst); + recv_get_open_write_ids(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_write_ids(const GetOpenWriteIdsRequest& rqst) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_open_write_ids", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_open_write_ids_pargs args; + args.rqst = &rqst; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_open_write_ids(GetOpenWriteIdsResponse& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_open_write_ids") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_open_write_ids_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_write_ids failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst) +{ + int32_t seqid = send_allocate_table_write_id(rqst); + recv_allocate_table_write_id(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_allocate_table_write_id(const AllocateTableWriteIdRequest& rqst) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("allocate_table_write_id", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_allocate_table_write_id_pargs args; + args.rqst = &rqst; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_allocate_table_write_id(AllocateTableWriteIdResponse& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("allocate_table_write_id") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_allocate_table_write_id_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "allocate_table_write_id failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + void ThriftHiveMetastoreConcurrentClient::lock(LockResponse& _return, const LockRequest& rqst) { int32_t seqid = send_lock(rqst); diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index bfa17eb..7d88aab 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -164,6 +164,8 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void abort_txn(const AbortTxnRequest& rqst) = 0; virtual void abort_txns(const AbortTxnsRequest& rqst) = 0; virtual void commit_txn(const CommitTxnRequest& rqst) = 0; + virtual void get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst) = 0; + virtual void allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst) = 0; virtual void lock(LockResponse& _return, const LockRequest& rqst) = 0; virtual void check_lock(LockResponse& _return, const CheckLockRequest& rqst) = 0; virtual void unlock(const UnlockRequest& rqst) = 0; @@ -686,6 +688,12 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void commit_txn(const CommitTxnRequest& /* rqst */) { return; } + void get_open_write_ids(GetOpenWriteIdsResponse& /* _return */, const GetOpenWriteIdsRequest& /* rqst */) { + return; + } + void allocate_table_write_id(AllocateTableWriteIdResponse& /* _return */, const AllocateTableWriteIdRequest& /* rqst */) { + return; + } void lock(LockResponse& /* _return */, const LockRequest& /* rqst */) { return; } @@ -18696,6 +18704,254 @@ class ThriftHiveMetastore_commit_txn_presult { }; +typedef struct _ThriftHiveMetastore_get_open_write_ids_args__isset { + _ThriftHiveMetastore_get_open_write_ids_args__isset() : rqst(false) {} + bool rqst :1; +} _ThriftHiveMetastore_get_open_write_ids_args__isset; + +class ThriftHiveMetastore_get_open_write_ids_args { + public: + + ThriftHiveMetastore_get_open_write_ids_args(const ThriftHiveMetastore_get_open_write_ids_args&); + ThriftHiveMetastore_get_open_write_ids_args& operator=(const ThriftHiveMetastore_get_open_write_ids_args&); + ThriftHiveMetastore_get_open_write_ids_args() { + } + + virtual ~ThriftHiveMetastore_get_open_write_ids_args() throw(); + GetOpenWriteIdsRequest rqst; + + _ThriftHiveMetastore_get_open_write_ids_args__isset __isset; + + void __set_rqst(const GetOpenWriteIdsRequest& val); + + bool operator == (const ThriftHiveMetastore_get_open_write_ids_args & rhs) const + { + if (!(rqst == rhs.rqst)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_open_write_ids_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_open_write_ids_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_open_write_ids_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_open_write_ids_pargs() throw(); + const GetOpenWriteIdsRequest* rqst; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_open_write_ids_result__isset { + _ThriftHiveMetastore_get_open_write_ids_result__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_open_write_ids_result__isset; + +class ThriftHiveMetastore_get_open_write_ids_result { + public: + + ThriftHiveMetastore_get_open_write_ids_result(const ThriftHiveMetastore_get_open_write_ids_result&); + ThriftHiveMetastore_get_open_write_ids_result& operator=(const ThriftHiveMetastore_get_open_write_ids_result&); + ThriftHiveMetastore_get_open_write_ids_result() { + } + + virtual ~ThriftHiveMetastore_get_open_write_ids_result() throw(); + GetOpenWriteIdsResponse success; + NoSuchTxnException o1; + MetaException o2; + + _ThriftHiveMetastore_get_open_write_ids_result__isset __isset; + + void __set_success(const GetOpenWriteIdsResponse& val); + + void __set_o1(const NoSuchTxnException& val); + + void __set_o2(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_get_open_write_ids_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_open_write_ids_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_open_write_ids_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_open_write_ids_presult__isset { + _ThriftHiveMetastore_get_open_write_ids_presult__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_open_write_ids_presult__isset; + +class ThriftHiveMetastore_get_open_write_ids_presult { + public: + + + virtual ~ThriftHiveMetastore_get_open_write_ids_presult() throw(); + GetOpenWriteIdsResponse* success; + NoSuchTxnException o1; + MetaException o2; + + _ThriftHiveMetastore_get_open_write_ids_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_allocate_table_write_id_args__isset { + _ThriftHiveMetastore_allocate_table_write_id_args__isset() : rqst(false) {} + bool rqst :1; +} _ThriftHiveMetastore_allocate_table_write_id_args__isset; + +class ThriftHiveMetastore_allocate_table_write_id_args { + public: + + ThriftHiveMetastore_allocate_table_write_id_args(const ThriftHiveMetastore_allocate_table_write_id_args&); + ThriftHiveMetastore_allocate_table_write_id_args& operator=(const ThriftHiveMetastore_allocate_table_write_id_args&); + ThriftHiveMetastore_allocate_table_write_id_args() { + } + + virtual ~ThriftHiveMetastore_allocate_table_write_id_args() throw(); + AllocateTableWriteIdRequest rqst; + + _ThriftHiveMetastore_allocate_table_write_id_args__isset __isset; + + void __set_rqst(const AllocateTableWriteIdRequest& val); + + bool operator == (const ThriftHiveMetastore_allocate_table_write_id_args & rhs) const + { + if (!(rqst == rhs.rqst)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_allocate_table_write_id_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_allocate_table_write_id_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_allocate_table_write_id_pargs { + public: + + + virtual ~ThriftHiveMetastore_allocate_table_write_id_pargs() throw(); + const AllocateTableWriteIdRequest* rqst; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_allocate_table_write_id_result__isset { + _ThriftHiveMetastore_allocate_table_write_id_result__isset() : success(false), o1(false), o2(false), o3(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_allocate_table_write_id_result__isset; + +class ThriftHiveMetastore_allocate_table_write_id_result { + public: + + ThriftHiveMetastore_allocate_table_write_id_result(const ThriftHiveMetastore_allocate_table_write_id_result&); + ThriftHiveMetastore_allocate_table_write_id_result& operator=(const ThriftHiveMetastore_allocate_table_write_id_result&); + ThriftHiveMetastore_allocate_table_write_id_result() { + } + + virtual ~ThriftHiveMetastore_allocate_table_write_id_result() throw(); + AllocateTableWriteIdResponse success; + NoSuchTxnException o1; + TxnAbortedException o2; + MetaException o3; + + _ThriftHiveMetastore_allocate_table_write_id_result__isset __isset; + + void __set_success(const AllocateTableWriteIdResponse& val); + + void __set_o1(const NoSuchTxnException& val); + + void __set_o2(const TxnAbortedException& val); + + void __set_o3(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_allocate_table_write_id_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_allocate_table_write_id_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_allocate_table_write_id_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_allocate_table_write_id_presult__isset { + _ThriftHiveMetastore_allocate_table_write_id_presult__isset() : success(false), o1(false), o2(false), o3(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_allocate_table_write_id_presult__isset; + +class ThriftHiveMetastore_allocate_table_write_id_presult { + public: + + + virtual ~ThriftHiveMetastore_allocate_table_write_id_presult() throw(); + AllocateTableWriteIdResponse* success; + NoSuchTxnException o1; + TxnAbortedException o2; + MetaException o3; + + _ThriftHiveMetastore_allocate_table_write_id_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_lock_args__isset { _ThriftHiveMetastore_lock_args__isset() : rqst(false) {} bool rqst :1; @@ -23597,6 +23853,12 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void commit_txn(const CommitTxnRequest& rqst); void send_commit_txn(const CommitTxnRequest& rqst); void recv_commit_txn(); + void get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst); + void send_get_open_write_ids(const GetOpenWriteIdsRequest& rqst); + void recv_get_open_write_ids(GetOpenWriteIdsResponse& _return); + void allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst); + void send_allocate_table_write_id(const AllocateTableWriteIdRequest& rqst); + void recv_allocate_table_write_id(AllocateTableWriteIdResponse& _return); void lock(LockResponse& _return, const LockRequest& rqst); void send_lock(const LockRequest& rqst); void recv_lock(LockResponse& _return); @@ -23866,6 +24128,8 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_abort_txn(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_abort_txns(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_commit_txn(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_open_write_ids(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_allocate_table_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_lock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_check_lock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_unlock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -24051,6 +24315,8 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["abort_txn"] = &ThriftHiveMetastoreProcessor::process_abort_txn; processMap_["abort_txns"] = &ThriftHiveMetastoreProcessor::process_abort_txns; processMap_["commit_txn"] = &ThriftHiveMetastoreProcessor::process_commit_txn; + processMap_["get_open_write_ids"] = &ThriftHiveMetastoreProcessor::process_get_open_write_ids; + processMap_["allocate_table_write_id"] = &ThriftHiveMetastoreProcessor::process_allocate_table_write_id; processMap_["lock"] = &ThriftHiveMetastoreProcessor::process_lock; processMap_["check_lock"] = &ThriftHiveMetastoreProcessor::process_check_lock; processMap_["unlock"] = &ThriftHiveMetastoreProcessor::process_unlock; @@ -25482,6 +25748,26 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi ifaces_[i]->commit_txn(rqst); } + void get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_open_write_ids(_return, rqst); + } + ifaces_[i]->get_open_write_ids(_return, rqst); + return; + } + + void allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->allocate_table_write_id(_return, rqst); + } + ifaces_[i]->allocate_table_write_id(_return, rqst); + return; + } + void lock(LockResponse& _return, const LockRequest& rqst) { size_t sz = ifaces_.size(); size_t i = 0; @@ -26309,6 +26595,12 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void commit_txn(const CommitTxnRequest& rqst); int32_t send_commit_txn(const CommitTxnRequest& rqst); void recv_commit_txn(const int32_t seqid); + void get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst); + int32_t send_get_open_write_ids(const GetOpenWriteIdsRequest& rqst); + void recv_get_open_write_ids(GetOpenWriteIdsResponse& _return, const int32_t seqid); + void allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst); + int32_t send_allocate_table_write_id(const AllocateTableWriteIdRequest& rqst); + void recv_allocate_table_write_id(AllocateTableWriteIdResponse& _return, const int32_t seqid); void lock(LockResponse& _return, const LockRequest& rqst); int32_t send_lock(const LockRequest& rqst); void recv_lock(LockResponse& _return, const int32_t seqid); diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index cf9a171..002713c 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -732,6 +732,16 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("commit_txn\n"); } + void get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst) { + // Your implementation goes here + printf("get_open_write_ids\n"); + } + + void allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst) { + // Your implementation goes here + printf("allocate_table_write_id\n"); + } + void lock(LockResponse& _return, const LockRequest& rqst) { // Your implementation goes here printf("lock\n"); diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index aadf8f1..9875c33 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -14308,6 +14308,808 @@ void CommitTxnRequest::printTo(std::ostream& out) const { } +GetOpenWriteIdsRequest::~GetOpenWriteIdsRequest() throw() { +} + + +void GetOpenWriteIdsRequest::__set_tableNames(const std::vector & val) { + this->tableNames = val; +} + +void GetOpenWriteIdsRequest::__set_validTxnStr(const std::string& val) { + this->validTxnStr = val; +} + +uint32_t GetOpenWriteIdsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_tableNames = false; + bool isset_validTxnStr = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->tableNames.clear(); + uint32_t _size617; + ::apache::thrift::protocol::TType _etype620; + xfer += iprot->readListBegin(_etype620, _size617); + this->tableNames.resize(_size617); + uint32_t _i621; + for (_i621 = 0; _i621 < _size617; ++_i621) + { + xfer += iprot->readString(this->tableNames[_i621]); + } + xfer += iprot->readListEnd(); + } + isset_tableNames = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->validTxnStr); + isset_validTxnStr = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_tableNames) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_validTxnStr) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetOpenWriteIdsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetOpenWriteIdsRequest"); + + xfer += oprot->writeFieldBegin("tableNames", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tableNames.size())); + std::vector ::const_iterator _iter622; + for (_iter622 = this->tableNames.begin(); _iter622 != this->tableNames.end(); ++_iter622) + { + xfer += oprot->writeString((*_iter622)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("validTxnStr", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->validTxnStr); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetOpenWriteIdsRequest &a, GetOpenWriteIdsRequest &b) { + using ::std::swap; + swap(a.tableNames, b.tableNames); + swap(a.validTxnStr, b.validTxnStr); +} + +GetOpenWriteIdsRequest::GetOpenWriteIdsRequest(const GetOpenWriteIdsRequest& other623) { + tableNames = other623.tableNames; + validTxnStr = other623.validTxnStr; +} +GetOpenWriteIdsRequest& GetOpenWriteIdsRequest::operator=(const GetOpenWriteIdsRequest& other624) { + tableNames = other624.tableNames; + validTxnStr = other624.validTxnStr; + return *this; +} +void GetOpenWriteIdsRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetOpenWriteIdsRequest("; + out << "tableNames=" << to_string(tableNames); + out << ", " << "validTxnStr=" << to_string(validTxnStr); + out << ")"; +} + + +OpenWriteIds::~OpenWriteIds() throw() { +} + + +void OpenWriteIds::__set_tableName(const std::string& val) { + this->tableName = val; +} + +void OpenWriteIds::__set_writeIdHighWaterMark(const int64_t val) { + this->writeIdHighWaterMark = val; +} + +void OpenWriteIds::__set_openWriteIds(const std::vector & val) { + this->openWriteIds = val; +} + +void OpenWriteIds::__set_minWriteId(const int64_t val) { + this->minWriteId = val; +__isset.minWriteId = true; +} + +void OpenWriteIds::__set_abortedBits(const std::string& val) { + this->abortedBits = val; +} + +uint32_t OpenWriteIds::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_tableName = false; + bool isset_writeIdHighWaterMark = false; + bool isset_openWriteIds = false; + bool isset_abortedBits = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tableName); + isset_tableName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeIdHighWaterMark); + isset_writeIdHighWaterMark = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->openWriteIds.clear(); + uint32_t _size625; + ::apache::thrift::protocol::TType _etype628; + xfer += iprot->readListBegin(_etype628, _size625); + this->openWriteIds.resize(_size625); + uint32_t _i629; + for (_i629 = 0; _i629 < _size625; ++_i629) + { + xfer += iprot->readI64(this->openWriteIds[_i629]); + } + xfer += iprot->readListEnd(); + } + isset_openWriteIds = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->minWriteId); + this->__isset.minWriteId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readBinary(this->abortedBits); + isset_abortedBits = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_tableName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_writeIdHighWaterMark) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_openWriteIds) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_abortedBits) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t OpenWriteIds::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("OpenWriteIds"); + + xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->tableName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("writeIdHighWaterMark", ::apache::thrift::protocol::T_I64, 2); + xfer += oprot->writeI64(this->writeIdHighWaterMark); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("openWriteIds", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->openWriteIds.size())); + std::vector ::const_iterator _iter630; + for (_iter630 = this->openWriteIds.begin(); _iter630 != this->openWriteIds.end(); ++_iter630) + { + xfer += oprot->writeI64((*_iter630)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + if (this->__isset.minWriteId) { + xfer += oprot->writeFieldBegin("minWriteId", ::apache::thrift::protocol::T_I64, 4); + xfer += oprot->writeI64(this->minWriteId); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldBegin("abortedBits", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeBinary(this->abortedBits); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(OpenWriteIds &a, OpenWriteIds &b) { + using ::std::swap; + swap(a.tableName, b.tableName); + swap(a.writeIdHighWaterMark, b.writeIdHighWaterMark); + swap(a.openWriteIds, b.openWriteIds); + swap(a.minWriteId, b.minWriteId); + swap(a.abortedBits, b.abortedBits); + swap(a.__isset, b.__isset); +} + +OpenWriteIds::OpenWriteIds(const OpenWriteIds& other631) { + tableName = other631.tableName; + writeIdHighWaterMark = other631.writeIdHighWaterMark; + openWriteIds = other631.openWriteIds; + minWriteId = other631.minWriteId; + abortedBits = other631.abortedBits; + __isset = other631.__isset; +} +OpenWriteIds& OpenWriteIds::operator=(const OpenWriteIds& other632) { + tableName = other632.tableName; + writeIdHighWaterMark = other632.writeIdHighWaterMark; + openWriteIds = other632.openWriteIds; + minWriteId = other632.minWriteId; + abortedBits = other632.abortedBits; + __isset = other632.__isset; + return *this; +} +void OpenWriteIds::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "OpenWriteIds("; + out << "tableName=" << to_string(tableName); + out << ", " << "writeIdHighWaterMark=" << to_string(writeIdHighWaterMark); + out << ", " << "openWriteIds=" << to_string(openWriteIds); + out << ", " << "minWriteId="; (__isset.minWriteId ? (out << to_string(minWriteId)) : (out << "")); + out << ", " << "abortedBits=" << to_string(abortedBits); + out << ")"; +} + + +GetOpenWriteIdsResponse::~GetOpenWriteIdsResponse() throw() { +} + + +void GetOpenWriteIdsResponse::__set_openWriteIds(const std::vector & val) { + this->openWriteIds = val; +} + +uint32_t GetOpenWriteIdsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_openWriteIds = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->openWriteIds.clear(); + uint32_t _size633; + ::apache::thrift::protocol::TType _etype636; + xfer += iprot->readListBegin(_etype636, _size633); + this->openWriteIds.resize(_size633); + uint32_t _i637; + for (_i637 = 0; _i637 < _size633; ++_i637) + { + xfer += this->openWriteIds[_i637].read(iprot); + } + xfer += iprot->readListEnd(); + } + isset_openWriteIds = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_openWriteIds) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetOpenWriteIdsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetOpenWriteIdsResponse"); + + xfer += oprot->writeFieldBegin("openWriteIds", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->openWriteIds.size())); + std::vector ::const_iterator _iter638; + for (_iter638 = this->openWriteIds.begin(); _iter638 != this->openWriteIds.end(); ++_iter638) + { + xfer += (*_iter638).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetOpenWriteIdsResponse &a, GetOpenWriteIdsResponse &b) { + using ::std::swap; + swap(a.openWriteIds, b.openWriteIds); +} + +GetOpenWriteIdsResponse::GetOpenWriteIdsResponse(const GetOpenWriteIdsResponse& other639) { + openWriteIds = other639.openWriteIds; +} +GetOpenWriteIdsResponse& GetOpenWriteIdsResponse::operator=(const GetOpenWriteIdsResponse& other640) { + openWriteIds = other640.openWriteIds; + return *this; +} +void GetOpenWriteIdsResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetOpenWriteIdsResponse("; + out << "openWriteIds=" << to_string(openWriteIds); + out << ")"; +} + + +AllocateTableWriteIdRequest::~AllocateTableWriteIdRequest() throw() { +} + + +void AllocateTableWriteIdRequest::__set_txnIds(const std::vector & val) { + this->txnIds = val; +} + +void AllocateTableWriteIdRequest::__set_dbName(const std::string& val) { + this->dbName = val; +} + +void AllocateTableWriteIdRequest::__set_tableName(const std::string& val) { + this->tableName = val; +} + +uint32_t AllocateTableWriteIdRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_txnIds = false; + bool isset_dbName = false; + bool isset_tableName = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->txnIds.clear(); + uint32_t _size641; + ::apache::thrift::protocol::TType _etype644; + xfer += iprot->readListBegin(_etype644, _size641); + this->txnIds.resize(_size641); + uint32_t _i645; + for (_i645 = 0; _i645 < _size641; ++_i645) + { + xfer += iprot->readI64(this->txnIds[_i645]); + } + xfer += iprot->readListEnd(); + } + isset_txnIds = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbName); + isset_dbName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tableName); + isset_tableName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_txnIds) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_dbName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tableName) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t AllocateTableWriteIdRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("AllocateTableWriteIdRequest"); + + xfer += oprot->writeFieldBegin("txnIds", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txnIds.size())); + std::vector ::const_iterator _iter646; + for (_iter646 = this->txnIds.begin(); _iter646 != this->txnIds.end(); ++_iter646) + { + xfer += oprot->writeI64((*_iter646)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->dbName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->tableName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(AllocateTableWriteIdRequest &a, AllocateTableWriteIdRequest &b) { + using ::std::swap; + swap(a.txnIds, b.txnIds); + swap(a.dbName, b.dbName); + swap(a.tableName, b.tableName); +} + +AllocateTableWriteIdRequest::AllocateTableWriteIdRequest(const AllocateTableWriteIdRequest& other647) { + txnIds = other647.txnIds; + dbName = other647.dbName; + tableName = other647.tableName; +} +AllocateTableWriteIdRequest& AllocateTableWriteIdRequest::operator=(const AllocateTableWriteIdRequest& other648) { + txnIds = other648.txnIds; + dbName = other648.dbName; + tableName = other648.tableName; + return *this; +} +void AllocateTableWriteIdRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "AllocateTableWriteIdRequest("; + out << "txnIds=" << to_string(txnIds); + out << ", " << "dbName=" << to_string(dbName); + out << ", " << "tableName=" << to_string(tableName); + out << ")"; +} + + +TxnToWriteId::~TxnToWriteId() throw() { +} + + +void TxnToWriteId::__set_txnId(const int64_t val) { + this->txnId = val; +} + +void TxnToWriteId::__set_writeId(const int64_t val) { + this->writeId = val; +} + +uint32_t TxnToWriteId::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_txnId = false; + bool isset_writeId = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + isset_txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + isset_writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_txnId) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_writeId) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t TxnToWriteId::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("TxnToWriteId"); + + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 1); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 2); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(TxnToWriteId &a, TxnToWriteId &b) { + using ::std::swap; + swap(a.txnId, b.txnId); + swap(a.writeId, b.writeId); +} + +TxnToWriteId::TxnToWriteId(const TxnToWriteId& other649) { + txnId = other649.txnId; + writeId = other649.writeId; +} +TxnToWriteId& TxnToWriteId::operator=(const TxnToWriteId& other650) { + txnId = other650.txnId; + writeId = other650.writeId; + return *this; +} +void TxnToWriteId::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "TxnToWriteId("; + out << "txnId=" << to_string(txnId); + out << ", " << "writeId=" << to_string(writeId); + out << ")"; +} + + +AllocateTableWriteIdResponse::~AllocateTableWriteIdResponse() throw() { +} + + +void AllocateTableWriteIdResponse::__set_txnToWriteIds(const std::vector & val) { + this->txnToWriteIds = val; +} + +uint32_t AllocateTableWriteIdResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_txnToWriteIds = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->txnToWriteIds.clear(); + uint32_t _size651; + ::apache::thrift::protocol::TType _etype654; + xfer += iprot->readListBegin(_etype654, _size651); + this->txnToWriteIds.resize(_size651); + uint32_t _i655; + for (_i655 = 0; _i655 < _size651; ++_i655) + { + xfer += this->txnToWriteIds[_i655].read(iprot); + } + xfer += iprot->readListEnd(); + } + isset_txnToWriteIds = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_txnToWriteIds) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t AllocateTableWriteIdResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("AllocateTableWriteIdResponse"); + + xfer += oprot->writeFieldBegin("txnToWriteIds", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->txnToWriteIds.size())); + std::vector ::const_iterator _iter656; + for (_iter656 = this->txnToWriteIds.begin(); _iter656 != this->txnToWriteIds.end(); ++_iter656) + { + xfer += (*_iter656).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(AllocateTableWriteIdResponse &a, AllocateTableWriteIdResponse &b) { + using ::std::swap; + swap(a.txnToWriteIds, b.txnToWriteIds); +} + +AllocateTableWriteIdResponse::AllocateTableWriteIdResponse(const AllocateTableWriteIdResponse& other657) { + txnToWriteIds = other657.txnToWriteIds; +} +AllocateTableWriteIdResponse& AllocateTableWriteIdResponse::operator=(const AllocateTableWriteIdResponse& other658) { + txnToWriteIds = other658.txnToWriteIds; + return *this; +} +void AllocateTableWriteIdResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "AllocateTableWriteIdResponse("; + out << "txnToWriteIds=" << to_string(txnToWriteIds); + out << ")"; +} + + LockComponent::~LockComponent() throw() { } @@ -14375,9 +15177,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast617; - xfer += iprot->readI32(ecast617); - this->type = (LockType::type)ecast617; + int32_t ecast659; + xfer += iprot->readI32(ecast659); + this->type = (LockType::type)ecast659; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -14385,9 +15187,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast618; - xfer += iprot->readI32(ecast618); - this->level = (LockLevel::type)ecast618; + int32_t ecast660; + xfer += iprot->readI32(ecast660); + this->level = (LockLevel::type)ecast660; isset_level = true; } else { xfer += iprot->skip(ftype); @@ -14419,9 +15221,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast619; - xfer += iprot->readI32(ecast619); - this->operationType = (DataOperationType::type)ecast619; + int32_t ecast661; + xfer += iprot->readI32(ecast661); + this->operationType = (DataOperationType::type)ecast661; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -14521,27 +15323,27 @@ void swap(LockComponent &a, LockComponent &b) { swap(a.__isset, b.__isset); } -LockComponent::LockComponent(const LockComponent& other620) { - type = other620.type; - level = other620.level; - dbname = other620.dbname; - tablename = other620.tablename; - partitionname = other620.partitionname; - operationType = other620.operationType; - isAcid = other620.isAcid; - isDynamicPartitionWrite = other620.isDynamicPartitionWrite; - __isset = other620.__isset; -} -LockComponent& LockComponent::operator=(const LockComponent& other621) { - type = other621.type; - level = other621.level; - dbname = other621.dbname; - tablename = other621.tablename; - partitionname = other621.partitionname; - operationType = other621.operationType; - isAcid = other621.isAcid; - isDynamicPartitionWrite = other621.isDynamicPartitionWrite; - __isset = other621.__isset; +LockComponent::LockComponent(const LockComponent& other662) { + type = other662.type; + level = other662.level; + dbname = other662.dbname; + tablename = other662.tablename; + partitionname = other662.partitionname; + operationType = other662.operationType; + isAcid = other662.isAcid; + isDynamicPartitionWrite = other662.isDynamicPartitionWrite; + __isset = other662.__isset; +} +LockComponent& LockComponent::operator=(const LockComponent& other663) { + type = other663.type; + level = other663.level; + dbname = other663.dbname; + tablename = other663.tablename; + partitionname = other663.partitionname; + operationType = other663.operationType; + isAcid = other663.isAcid; + isDynamicPartitionWrite = other663.isDynamicPartitionWrite; + __isset = other663.__isset; return *this; } void LockComponent::printTo(std::ostream& out) const { @@ -14613,14 +15415,14 @@ uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->component.clear(); - uint32_t _size622; - ::apache::thrift::protocol::TType _etype625; - xfer += iprot->readListBegin(_etype625, _size622); - this->component.resize(_size622); - uint32_t _i626; - for (_i626 = 0; _i626 < _size622; ++_i626) + uint32_t _size664; + ::apache::thrift::protocol::TType _etype667; + xfer += iprot->readListBegin(_etype667, _size664); + this->component.resize(_size664); + uint32_t _i668; + for (_i668 = 0; _i668 < _size664; ++_i668) { - xfer += this->component[_i626].read(iprot); + xfer += this->component[_i668].read(iprot); } xfer += iprot->readListEnd(); } @@ -14687,10 +15489,10 @@ uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeFieldBegin("component", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->component.size())); - std::vector ::const_iterator _iter627; - for (_iter627 = this->component.begin(); _iter627 != this->component.end(); ++_iter627) + std::vector ::const_iterator _iter669; + for (_iter669 = this->component.begin(); _iter669 != this->component.end(); ++_iter669) { - xfer += (*_iter627).write(oprot); + xfer += (*_iter669).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14729,21 +15531,21 @@ void swap(LockRequest &a, LockRequest &b) { swap(a.__isset, b.__isset); } -LockRequest::LockRequest(const LockRequest& other628) { - component = other628.component; - txnid = other628.txnid; - user = other628.user; - hostname = other628.hostname; - agentInfo = other628.agentInfo; - __isset = other628.__isset; -} -LockRequest& LockRequest::operator=(const LockRequest& other629) { - component = other629.component; - txnid = other629.txnid; - user = other629.user; - hostname = other629.hostname; - agentInfo = other629.agentInfo; - __isset = other629.__isset; +LockRequest::LockRequest(const LockRequest& other670) { + component = other670.component; + txnid = other670.txnid; + user = other670.user; + hostname = other670.hostname; + agentInfo = other670.agentInfo; + __isset = other670.__isset; +} +LockRequest& LockRequest::operator=(const LockRequest& other671) { + component = other671.component; + txnid = other671.txnid; + user = other671.user; + hostname = other671.hostname; + agentInfo = other671.agentInfo; + __isset = other671.__isset; return *this; } void LockRequest::printTo(std::ostream& out) const { @@ -14803,9 +15605,9 @@ uint32_t LockResponse::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast630; - xfer += iprot->readI32(ecast630); - this->state = (LockState::type)ecast630; + int32_t ecast672; + xfer += iprot->readI32(ecast672); + this->state = (LockState::type)ecast672; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -14851,13 +15653,13 @@ void swap(LockResponse &a, LockResponse &b) { swap(a.state, b.state); } -LockResponse::LockResponse(const LockResponse& other631) { - lockid = other631.lockid; - state = other631.state; +LockResponse::LockResponse(const LockResponse& other673) { + lockid = other673.lockid; + state = other673.state; } -LockResponse& LockResponse::operator=(const LockResponse& other632) { - lockid = other632.lockid; - state = other632.state; +LockResponse& LockResponse::operator=(const LockResponse& other674) { + lockid = other674.lockid; + state = other674.state; return *this; } void LockResponse::printTo(std::ostream& out) const { @@ -14979,17 +15781,17 @@ void swap(CheckLockRequest &a, CheckLockRequest &b) { swap(a.__isset, b.__isset); } -CheckLockRequest::CheckLockRequest(const CheckLockRequest& other633) { - lockid = other633.lockid; - txnid = other633.txnid; - elapsed_ms = other633.elapsed_ms; - __isset = other633.__isset; +CheckLockRequest::CheckLockRequest(const CheckLockRequest& other675) { + lockid = other675.lockid; + txnid = other675.txnid; + elapsed_ms = other675.elapsed_ms; + __isset = other675.__isset; } -CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other634) { - lockid = other634.lockid; - txnid = other634.txnid; - elapsed_ms = other634.elapsed_ms; - __isset = other634.__isset; +CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other676) { + lockid = other676.lockid; + txnid = other676.txnid; + elapsed_ms = other676.elapsed_ms; + __isset = other676.__isset; return *this; } void CheckLockRequest::printTo(std::ostream& out) const { @@ -15073,11 +15875,11 @@ void swap(UnlockRequest &a, UnlockRequest &b) { swap(a.lockid, b.lockid); } -UnlockRequest::UnlockRequest(const UnlockRequest& other635) { - lockid = other635.lockid; +UnlockRequest::UnlockRequest(const UnlockRequest& other677) { + lockid = other677.lockid; } -UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other636) { - lockid = other636.lockid; +UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other678) { + lockid = other678.lockid; return *this; } void UnlockRequest::printTo(std::ostream& out) const { @@ -15216,19 +16018,19 @@ void swap(ShowLocksRequest &a, ShowLocksRequest &b) { swap(a.__isset, b.__isset); } -ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other637) { - dbname = other637.dbname; - tablename = other637.tablename; - partname = other637.partname; - isExtended = other637.isExtended; - __isset = other637.__isset; +ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other679) { + dbname = other679.dbname; + tablename = other679.tablename; + partname = other679.partname; + isExtended = other679.isExtended; + __isset = other679.__isset; } -ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other638) { - dbname = other638.dbname; - tablename = other638.tablename; - partname = other638.partname; - isExtended = other638.isExtended; - __isset = other638.__isset; +ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other680) { + dbname = other680.dbname; + tablename = other680.tablename; + partname = other680.partname; + isExtended = other680.isExtended; + __isset = other680.__isset; return *this; } void ShowLocksRequest::printTo(std::ostream& out) const { @@ -15381,9 +16183,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast639; - xfer += iprot->readI32(ecast639); - this->state = (LockState::type)ecast639; + int32_t ecast681; + xfer += iprot->readI32(ecast681); + this->state = (LockState::type)ecast681; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -15391,9 +16193,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast640; - xfer += iprot->readI32(ecast640); - this->type = (LockType::type)ecast640; + int32_t ecast682; + xfer += iprot->readI32(ecast682); + this->type = (LockType::type)ecast682; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -15609,43 +16411,43 @@ void swap(ShowLocksResponseElement &a, ShowLocksResponseElement &b) { swap(a.__isset, b.__isset); } -ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other641) { - lockid = other641.lockid; - dbname = other641.dbname; - tablename = other641.tablename; - partname = other641.partname; - state = other641.state; - type = other641.type; - txnid = other641.txnid; - lastheartbeat = other641.lastheartbeat; - acquiredat = other641.acquiredat; - user = other641.user; - hostname = other641.hostname; - heartbeatCount = other641.heartbeatCount; - agentInfo = other641.agentInfo; - blockedByExtId = other641.blockedByExtId; - blockedByIntId = other641.blockedByIntId; - lockIdInternal = other641.lockIdInternal; - __isset = other641.__isset; -} -ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other642) { - lockid = other642.lockid; - dbname = other642.dbname; - tablename = other642.tablename; - partname = other642.partname; - state = other642.state; - type = other642.type; - txnid = other642.txnid; - lastheartbeat = other642.lastheartbeat; - acquiredat = other642.acquiredat; - user = other642.user; - hostname = other642.hostname; - heartbeatCount = other642.heartbeatCount; - agentInfo = other642.agentInfo; - blockedByExtId = other642.blockedByExtId; - blockedByIntId = other642.blockedByIntId; - lockIdInternal = other642.lockIdInternal; - __isset = other642.__isset; +ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other683) { + lockid = other683.lockid; + dbname = other683.dbname; + tablename = other683.tablename; + partname = other683.partname; + state = other683.state; + type = other683.type; + txnid = other683.txnid; + lastheartbeat = other683.lastheartbeat; + acquiredat = other683.acquiredat; + user = other683.user; + hostname = other683.hostname; + heartbeatCount = other683.heartbeatCount; + agentInfo = other683.agentInfo; + blockedByExtId = other683.blockedByExtId; + blockedByIntId = other683.blockedByIntId; + lockIdInternal = other683.lockIdInternal; + __isset = other683.__isset; +} +ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other684) { + lockid = other684.lockid; + dbname = other684.dbname; + tablename = other684.tablename; + partname = other684.partname; + state = other684.state; + type = other684.type; + txnid = other684.txnid; + lastheartbeat = other684.lastheartbeat; + acquiredat = other684.acquiredat; + user = other684.user; + hostname = other684.hostname; + heartbeatCount = other684.heartbeatCount; + agentInfo = other684.agentInfo; + blockedByExtId = other684.blockedByExtId; + blockedByIntId = other684.blockedByIntId; + lockIdInternal = other684.lockIdInternal; + __isset = other684.__isset; return *this; } void ShowLocksResponseElement::printTo(std::ostream& out) const { @@ -15704,14 +16506,14 @@ uint32_t ShowLocksResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->locks.clear(); - uint32_t _size643; - ::apache::thrift::protocol::TType _etype646; - xfer += iprot->readListBegin(_etype646, _size643); - this->locks.resize(_size643); - uint32_t _i647; - for (_i647 = 0; _i647 < _size643; ++_i647) + uint32_t _size685; + ::apache::thrift::protocol::TType _etype688; + xfer += iprot->readListBegin(_etype688, _size685); + this->locks.resize(_size685); + uint32_t _i689; + for (_i689 = 0; _i689 < _size685; ++_i689) { - xfer += this->locks[_i647].read(iprot); + xfer += this->locks[_i689].read(iprot); } xfer += iprot->readListEnd(); } @@ -15740,10 +16542,10 @@ uint32_t ShowLocksResponse::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("locks", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->locks.size())); - std::vector ::const_iterator _iter648; - for (_iter648 = this->locks.begin(); _iter648 != this->locks.end(); ++_iter648) + std::vector ::const_iterator _iter690; + for (_iter690 = this->locks.begin(); _iter690 != this->locks.end(); ++_iter690) { - xfer += (*_iter648).write(oprot); + xfer += (*_iter690).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15760,13 +16562,13 @@ void swap(ShowLocksResponse &a, ShowLocksResponse &b) { swap(a.__isset, b.__isset); } -ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other649) { - locks = other649.locks; - __isset = other649.__isset; +ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other691) { + locks = other691.locks; + __isset = other691.__isset; } -ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other650) { - locks = other650.locks; - __isset = other650.__isset; +ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other692) { + locks = other692.locks; + __isset = other692.__isset; return *this; } void ShowLocksResponse::printTo(std::ostream& out) const { @@ -15867,15 +16669,15 @@ void swap(HeartbeatRequest &a, HeartbeatRequest &b) { swap(a.__isset, b.__isset); } -HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other651) { - lockid = other651.lockid; - txnid = other651.txnid; - __isset = other651.__isset; +HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other693) { + lockid = other693.lockid; + txnid = other693.txnid; + __isset = other693.__isset; } -HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other652) { - lockid = other652.lockid; - txnid = other652.txnid; - __isset = other652.__isset; +HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other694) { + lockid = other694.lockid; + txnid = other694.txnid; + __isset = other694.__isset; return *this; } void HeartbeatRequest::printTo(std::ostream& out) const { @@ -15978,13 +16780,13 @@ void swap(HeartbeatTxnRangeRequest &a, HeartbeatTxnRangeRequest &b) { swap(a.max, b.max); } -HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other653) { - min = other653.min; - max = other653.max; +HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other695) { + min = other695.min; + max = other695.max; } -HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other654) { - min = other654.min; - max = other654.max; +HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other696) { + min = other696.min; + max = other696.max; return *this; } void HeartbeatTxnRangeRequest::printTo(std::ostream& out) const { @@ -16035,15 +16837,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->aborted.clear(); - uint32_t _size655; - ::apache::thrift::protocol::TType _etype658; - xfer += iprot->readSetBegin(_etype658, _size655); - uint32_t _i659; - for (_i659 = 0; _i659 < _size655; ++_i659) + uint32_t _size697; + ::apache::thrift::protocol::TType _etype700; + xfer += iprot->readSetBegin(_etype700, _size697); + uint32_t _i701; + for (_i701 = 0; _i701 < _size697; ++_i701) { - int64_t _elem660; - xfer += iprot->readI64(_elem660); - this->aborted.insert(_elem660); + int64_t _elem702; + xfer += iprot->readI64(_elem702); + this->aborted.insert(_elem702); } xfer += iprot->readSetEnd(); } @@ -16056,15 +16858,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->nosuch.clear(); - uint32_t _size661; - ::apache::thrift::protocol::TType _etype664; - xfer += iprot->readSetBegin(_etype664, _size661); - uint32_t _i665; - for (_i665 = 0; _i665 < _size661; ++_i665) + uint32_t _size703; + ::apache::thrift::protocol::TType _etype706; + xfer += iprot->readSetBegin(_etype706, _size703); + uint32_t _i707; + for (_i707 = 0; _i707 < _size703; ++_i707) { - int64_t _elem666; - xfer += iprot->readI64(_elem666); - this->nosuch.insert(_elem666); + int64_t _elem708; + xfer += iprot->readI64(_elem708); + this->nosuch.insert(_elem708); } xfer += iprot->readSetEnd(); } @@ -16097,10 +16899,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("aborted", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->aborted.size())); - std::set ::const_iterator _iter667; - for (_iter667 = this->aborted.begin(); _iter667 != this->aborted.end(); ++_iter667) + std::set ::const_iterator _iter709; + for (_iter709 = this->aborted.begin(); _iter709 != this->aborted.end(); ++_iter709) { - xfer += oprot->writeI64((*_iter667)); + xfer += oprot->writeI64((*_iter709)); } xfer += oprot->writeSetEnd(); } @@ -16109,10 +16911,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("nosuch", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->nosuch.size())); - std::set ::const_iterator _iter668; - for (_iter668 = this->nosuch.begin(); _iter668 != this->nosuch.end(); ++_iter668) + std::set ::const_iterator _iter710; + for (_iter710 = this->nosuch.begin(); _iter710 != this->nosuch.end(); ++_iter710) { - xfer += oprot->writeI64((*_iter668)); + xfer += oprot->writeI64((*_iter710)); } xfer += oprot->writeSetEnd(); } @@ -16129,13 +16931,13 @@ void swap(HeartbeatTxnRangeResponse &a, HeartbeatTxnRangeResponse &b) { swap(a.nosuch, b.nosuch); } -HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other669) { - aborted = other669.aborted; - nosuch = other669.nosuch; +HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other711) { + aborted = other711.aborted; + nosuch = other711.nosuch; } -HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other670) { - aborted = other670.aborted; - nosuch = other670.nosuch; +HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other712) { + aborted = other712.aborted; + nosuch = other712.nosuch; return *this; } void HeartbeatTxnRangeResponse::printTo(std::ostream& out) const { @@ -16228,9 +17030,9 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast671; - xfer += iprot->readI32(ecast671); - this->type = (CompactionType::type)ecast671; + int32_t ecast713; + xfer += iprot->readI32(ecast713); + this->type = (CompactionType::type)ecast713; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -16248,17 +17050,17 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size672; - ::apache::thrift::protocol::TType _ktype673; - ::apache::thrift::protocol::TType _vtype674; - xfer += iprot->readMapBegin(_ktype673, _vtype674, _size672); - uint32_t _i676; - for (_i676 = 0; _i676 < _size672; ++_i676) + uint32_t _size714; + ::apache::thrift::protocol::TType _ktype715; + ::apache::thrift::protocol::TType _vtype716; + xfer += iprot->readMapBegin(_ktype715, _vtype716, _size714); + uint32_t _i718; + for (_i718 = 0; _i718 < _size714; ++_i718) { - std::string _key677; - xfer += iprot->readString(_key677); - std::string& _val678 = this->properties[_key677]; - xfer += iprot->readString(_val678); + std::string _key719; + xfer += iprot->readString(_key719); + std::string& _val720 = this->properties[_key719]; + xfer += iprot->readString(_val720); } xfer += iprot->readMapEnd(); } @@ -16316,11 +17118,11 @@ uint32_t CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 6); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter679; - for (_iter679 = this->properties.begin(); _iter679 != this->properties.end(); ++_iter679) + std::map ::const_iterator _iter721; + for (_iter721 = this->properties.begin(); _iter721 != this->properties.end(); ++_iter721) { - xfer += oprot->writeString(_iter679->first); - xfer += oprot->writeString(_iter679->second); + xfer += oprot->writeString(_iter721->first); + xfer += oprot->writeString(_iter721->second); } xfer += oprot->writeMapEnd(); } @@ -16342,23 +17144,23 @@ void swap(CompactionRequest &a, CompactionRequest &b) { swap(a.__isset, b.__isset); } -CompactionRequest::CompactionRequest(const CompactionRequest& other680) { - dbname = other680.dbname; - tablename = other680.tablename; - partitionname = other680.partitionname; - type = other680.type; - runas = other680.runas; - properties = other680.properties; - __isset = other680.__isset; -} -CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other681) { - dbname = other681.dbname; - tablename = other681.tablename; - partitionname = other681.partitionname; - type = other681.type; - runas = other681.runas; - properties = other681.properties; - __isset = other681.__isset; +CompactionRequest::CompactionRequest(const CompactionRequest& other722) { + dbname = other722.dbname; + tablename = other722.tablename; + partitionname = other722.partitionname; + type = other722.type; + runas = other722.runas; + properties = other722.properties; + __isset = other722.__isset; +} +CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other723) { + dbname = other723.dbname; + tablename = other723.tablename; + partitionname = other723.partitionname; + type = other723.type; + runas = other723.runas; + properties = other723.properties; + __isset = other723.__isset; return *this; } void CompactionRequest::printTo(std::ostream& out) const { @@ -16485,15 +17287,15 @@ void swap(CompactionResponse &a, CompactionResponse &b) { swap(a.accepted, b.accepted); } -CompactionResponse::CompactionResponse(const CompactionResponse& other682) { - id = other682.id; - state = other682.state; - accepted = other682.accepted; +CompactionResponse::CompactionResponse(const CompactionResponse& other724) { + id = other724.id; + state = other724.state; + accepted = other724.accepted; } -CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other683) { - id = other683.id; - state = other683.state; - accepted = other683.accepted; +CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other725) { + id = other725.id; + state = other725.state; + accepted = other725.accepted; return *this; } void CompactionResponse::printTo(std::ostream& out) const { @@ -16554,11 +17356,11 @@ void swap(ShowCompactRequest &a, ShowCompactRequest &b) { (void) b; } -ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other684) { - (void) other684; +ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other726) { + (void) other726; } -ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other685) { - (void) other685; +ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other727) { + (void) other727; return *this; } void ShowCompactRequest::printTo(std::ostream& out) const { @@ -16684,9 +17486,9 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast686; - xfer += iprot->readI32(ecast686); - this->type = (CompactionType::type)ecast686; + int32_t ecast728; + xfer += iprot->readI32(ecast728); + this->type = (CompactionType::type)ecast728; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -16873,37 +17675,37 @@ void swap(ShowCompactResponseElement &a, ShowCompactResponseElement &b) { swap(a.__isset, b.__isset); } -ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other687) { - dbname = other687.dbname; - tablename = other687.tablename; - partitionname = other687.partitionname; - type = other687.type; - state = other687.state; - workerid = other687.workerid; - start = other687.start; - runAs = other687.runAs; - hightestTxnId = other687.hightestTxnId; - metaInfo = other687.metaInfo; - endTime = other687.endTime; - hadoopJobId = other687.hadoopJobId; - id = other687.id; - __isset = other687.__isset; -} -ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other688) { - dbname = other688.dbname; - tablename = other688.tablename; - partitionname = other688.partitionname; - type = other688.type; - state = other688.state; - workerid = other688.workerid; - start = other688.start; - runAs = other688.runAs; - hightestTxnId = other688.hightestTxnId; - metaInfo = other688.metaInfo; - endTime = other688.endTime; - hadoopJobId = other688.hadoopJobId; - id = other688.id; - __isset = other688.__isset; +ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other729) { + dbname = other729.dbname; + tablename = other729.tablename; + partitionname = other729.partitionname; + type = other729.type; + state = other729.state; + workerid = other729.workerid; + start = other729.start; + runAs = other729.runAs; + hightestTxnId = other729.hightestTxnId; + metaInfo = other729.metaInfo; + endTime = other729.endTime; + hadoopJobId = other729.hadoopJobId; + id = other729.id; + __isset = other729.__isset; +} +ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other730) { + dbname = other730.dbname; + tablename = other730.tablename; + partitionname = other730.partitionname; + type = other730.type; + state = other730.state; + workerid = other730.workerid; + start = other730.start; + runAs = other730.runAs; + hightestTxnId = other730.hightestTxnId; + metaInfo = other730.metaInfo; + endTime = other730.endTime; + hadoopJobId = other730.hadoopJobId; + id = other730.id; + __isset = other730.__isset; return *this; } void ShowCompactResponseElement::printTo(std::ostream& out) const { @@ -16960,14 +17762,14 @@ uint32_t ShowCompactResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->compacts.clear(); - uint32_t _size689; - ::apache::thrift::protocol::TType _etype692; - xfer += iprot->readListBegin(_etype692, _size689); - this->compacts.resize(_size689); - uint32_t _i693; - for (_i693 = 0; _i693 < _size689; ++_i693) + uint32_t _size731; + ::apache::thrift::protocol::TType _etype734; + xfer += iprot->readListBegin(_etype734, _size731); + this->compacts.resize(_size731); + uint32_t _i735; + for (_i735 = 0; _i735 < _size731; ++_i735) { - xfer += this->compacts[_i693].read(iprot); + xfer += this->compacts[_i735].read(iprot); } xfer += iprot->readListEnd(); } @@ -16998,10 +17800,10 @@ uint32_t ShowCompactResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("compacts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->compacts.size())); - std::vector ::const_iterator _iter694; - for (_iter694 = this->compacts.begin(); _iter694 != this->compacts.end(); ++_iter694) + std::vector ::const_iterator _iter736; + for (_iter736 = this->compacts.begin(); _iter736 != this->compacts.end(); ++_iter736) { - xfer += (*_iter694).write(oprot); + xfer += (*_iter736).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17017,11 +17819,11 @@ void swap(ShowCompactResponse &a, ShowCompactResponse &b) { swap(a.compacts, b.compacts); } -ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other695) { - compacts = other695.compacts; +ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other737) { + compacts = other737.compacts; } -ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other696) { - compacts = other696.compacts; +ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other738) { + compacts = other738.compacts; return *this; } void ShowCompactResponse::printTo(std::ostream& out) const { @@ -17110,14 +17912,14 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionnames.clear(); - uint32_t _size697; - ::apache::thrift::protocol::TType _etype700; - xfer += iprot->readListBegin(_etype700, _size697); - this->partitionnames.resize(_size697); - uint32_t _i701; - for (_i701 = 0; _i701 < _size697; ++_i701) + uint32_t _size739; + ::apache::thrift::protocol::TType _etype742; + xfer += iprot->readListBegin(_etype742, _size739); + this->partitionnames.resize(_size739); + uint32_t _i743; + for (_i743 = 0; _i743 < _size739; ++_i743) { - xfer += iprot->readString(this->partitionnames[_i701]); + xfer += iprot->readString(this->partitionnames[_i743]); } xfer += iprot->readListEnd(); } @@ -17128,9 +17930,9 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast702; - xfer += iprot->readI32(ecast702); - this->operationType = (DataOperationType::type)ecast702; + int32_t ecast744; + xfer += iprot->readI32(ecast744); + this->operationType = (DataOperationType::type)ecast744; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -17176,10 +17978,10 @@ uint32_t AddDynamicPartitions::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitionnames", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionnames.size())); - std::vector ::const_iterator _iter703; - for (_iter703 = this->partitionnames.begin(); _iter703 != this->partitionnames.end(); ++_iter703) + std::vector ::const_iterator _iter745; + for (_iter745 = this->partitionnames.begin(); _iter745 != this->partitionnames.end(); ++_iter745) { - xfer += oprot->writeString((*_iter703)); + xfer += oprot->writeString((*_iter745)); } xfer += oprot->writeListEnd(); } @@ -17205,21 +18007,21 @@ void swap(AddDynamicPartitions &a, AddDynamicPartitions &b) { swap(a.__isset, b.__isset); } -AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other704) { - txnid = other704.txnid; - dbname = other704.dbname; - tablename = other704.tablename; - partitionnames = other704.partitionnames; - operationType = other704.operationType; - __isset = other704.__isset; -} -AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other705) { - txnid = other705.txnid; - dbname = other705.dbname; - tablename = other705.tablename; - partitionnames = other705.partitionnames; - operationType = other705.operationType; - __isset = other705.__isset; +AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other746) { + txnid = other746.txnid; + dbname = other746.dbname; + tablename = other746.tablename; + partitionnames = other746.partitionnames; + operationType = other746.operationType; + __isset = other746.__isset; +} +AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other747) { + txnid = other747.txnid; + dbname = other747.dbname; + tablename = other747.tablename; + partitionnames = other747.partitionnames; + operationType = other747.operationType; + __isset = other747.__isset; return *this; } void AddDynamicPartitions::printTo(std::ostream& out) const { @@ -17401,23 +18203,23 @@ void swap(BasicTxnInfo &a, BasicTxnInfo &b) { swap(a.__isset, b.__isset); } -BasicTxnInfo::BasicTxnInfo(const BasicTxnInfo& other706) { - isnull = other706.isnull; - time = other706.time; - txnid = other706.txnid; - dbname = other706.dbname; - tablename = other706.tablename; - partitionname = other706.partitionname; - __isset = other706.__isset; -} -BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other707) { - isnull = other707.isnull; - time = other707.time; - txnid = other707.txnid; - dbname = other707.dbname; - tablename = other707.tablename; - partitionname = other707.partitionname; - __isset = other707.__isset; +BasicTxnInfo::BasicTxnInfo(const BasicTxnInfo& other748) { + isnull = other748.isnull; + time = other748.time; + txnid = other748.txnid; + dbname = other748.dbname; + tablename = other748.tablename; + partitionname = other748.partitionname; + __isset = other748.__isset; +} +BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other749) { + isnull = other749.isnull; + time = other749.time; + txnid = other749.txnid; + dbname = other749.dbname; + tablename = other749.tablename; + partitionname = other749.partitionname; + __isset = other749.__isset; return *this; } void BasicTxnInfo::printTo(std::ostream& out) const { @@ -17498,15 +18300,15 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_SET) { { this->tablesUsed.clear(); - uint32_t _size708; - ::apache::thrift::protocol::TType _etype711; - xfer += iprot->readSetBegin(_etype711, _size708); - uint32_t _i712; - for (_i712 = 0; _i712 < _size708; ++_i712) + uint32_t _size750; + ::apache::thrift::protocol::TType _etype753; + xfer += iprot->readSetBegin(_etype753, _size750); + uint32_t _i754; + for (_i754 = 0; _i754 < _size750; ++_i754) { - std::string _elem713; - xfer += iprot->readString(_elem713); - this->tablesUsed.insert(_elem713); + std::string _elem755; + xfer += iprot->readString(_elem755); + this->tablesUsed.insert(_elem755); } xfer += iprot->readSetEnd(); } @@ -17557,10 +18359,10 @@ uint32_t CreationMetadata::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 3); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tablesUsed.size())); - std::set ::const_iterator _iter714; - for (_iter714 = this->tablesUsed.begin(); _iter714 != this->tablesUsed.end(); ++_iter714) + std::set ::const_iterator _iter756; + for (_iter756 = this->tablesUsed.begin(); _iter756 != this->tablesUsed.end(); ++_iter756) { - xfer += oprot->writeString((*_iter714)); + xfer += oprot->writeString((*_iter756)); } xfer += oprot->writeSetEnd(); } @@ -17585,19 +18387,19 @@ void swap(CreationMetadata &a, CreationMetadata &b) { swap(a.__isset, b.__isset); } -CreationMetadata::CreationMetadata(const CreationMetadata& other715) { - dbName = other715.dbName; - tblName = other715.tblName; - tablesUsed = other715.tablesUsed; - validTxnList = other715.validTxnList; - __isset = other715.__isset; +CreationMetadata::CreationMetadata(const CreationMetadata& other757) { + dbName = other757.dbName; + tblName = other757.tblName; + tablesUsed = other757.tablesUsed; + validTxnList = other757.validTxnList; + __isset = other757.__isset; } -CreationMetadata& CreationMetadata::operator=(const CreationMetadata& other716) { - dbName = other716.dbName; - tblName = other716.tblName; - tablesUsed = other716.tablesUsed; - validTxnList = other716.validTxnList; - __isset = other716.__isset; +CreationMetadata& CreationMetadata::operator=(const CreationMetadata& other758) { + dbName = other758.dbName; + tblName = other758.tblName; + tablesUsed = other758.tablesUsed; + validTxnList = other758.validTxnList; + __isset = other758.__isset; return *this; } void CreationMetadata::printTo(std::ostream& out) const { @@ -17702,15 +18504,15 @@ void swap(NotificationEventRequest &a, NotificationEventRequest &b) { swap(a.__isset, b.__isset); } -NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other717) { - lastEvent = other717.lastEvent; - maxEvents = other717.maxEvents; - __isset = other717.__isset; +NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other759) { + lastEvent = other759.lastEvent; + maxEvents = other759.maxEvents; + __isset = other759.__isset; } -NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other718) { - lastEvent = other718.lastEvent; - maxEvents = other718.maxEvents; - __isset = other718.__isset; +NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other760) { + lastEvent = other760.lastEvent; + maxEvents = other760.maxEvents; + __isset = other760.__isset; return *this; } void NotificationEventRequest::printTo(std::ostream& out) const { @@ -17911,25 +18713,25 @@ void swap(NotificationEvent &a, NotificationEvent &b) { swap(a.__isset, b.__isset); } -NotificationEvent::NotificationEvent(const NotificationEvent& other719) { - eventId = other719.eventId; - eventTime = other719.eventTime; - eventType = other719.eventType; - dbName = other719.dbName; - tableName = other719.tableName; - message = other719.message; - messageFormat = other719.messageFormat; - __isset = other719.__isset; -} -NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other720) { - eventId = other720.eventId; - eventTime = other720.eventTime; - eventType = other720.eventType; - dbName = other720.dbName; - tableName = other720.tableName; - message = other720.message; - messageFormat = other720.messageFormat; - __isset = other720.__isset; +NotificationEvent::NotificationEvent(const NotificationEvent& other761) { + eventId = other761.eventId; + eventTime = other761.eventTime; + eventType = other761.eventType; + dbName = other761.dbName; + tableName = other761.tableName; + message = other761.message; + messageFormat = other761.messageFormat; + __isset = other761.__isset; +} +NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other762) { + eventId = other762.eventId; + eventTime = other762.eventTime; + eventType = other762.eventType; + dbName = other762.dbName; + tableName = other762.tableName; + message = other762.message; + messageFormat = other762.messageFormat; + __isset = other762.__isset; return *this; } void NotificationEvent::printTo(std::ostream& out) const { @@ -17980,14 +18782,14 @@ uint32_t NotificationEventResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->events.clear(); - uint32_t _size721; - ::apache::thrift::protocol::TType _etype724; - xfer += iprot->readListBegin(_etype724, _size721); - this->events.resize(_size721); - uint32_t _i725; - for (_i725 = 0; _i725 < _size721; ++_i725) + uint32_t _size763; + ::apache::thrift::protocol::TType _etype766; + xfer += iprot->readListBegin(_etype766, _size763); + this->events.resize(_size763); + uint32_t _i767; + for (_i767 = 0; _i767 < _size763; ++_i767) { - xfer += this->events[_i725].read(iprot); + xfer += this->events[_i767].read(iprot); } xfer += iprot->readListEnd(); } @@ -18018,10 +18820,10 @@ uint32_t NotificationEventResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("events", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->events.size())); - std::vector ::const_iterator _iter726; - for (_iter726 = this->events.begin(); _iter726 != this->events.end(); ++_iter726) + std::vector ::const_iterator _iter768; + for (_iter768 = this->events.begin(); _iter768 != this->events.end(); ++_iter768) { - xfer += (*_iter726).write(oprot); + xfer += (*_iter768).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18037,11 +18839,11 @@ void swap(NotificationEventResponse &a, NotificationEventResponse &b) { swap(a.events, b.events); } -NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other727) { - events = other727.events; +NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other769) { + events = other769.events; } -NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other728) { - events = other728.events; +NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other770) { + events = other770.events; return *this; } void NotificationEventResponse::printTo(std::ostream& out) const { @@ -18123,11 +18925,11 @@ void swap(CurrentNotificationEventId &a, CurrentNotificationEventId &b) { swap(a.eventId, b.eventId); } -CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other729) { - eventId = other729.eventId; +CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other771) { + eventId = other771.eventId; } -CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other730) { - eventId = other730.eventId; +CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other772) { + eventId = other772.eventId; return *this; } void CurrentNotificationEventId::printTo(std::ostream& out) const { @@ -18229,13 +19031,13 @@ void swap(NotificationEventsCountRequest &a, NotificationEventsCountRequest &b) swap(a.dbName, b.dbName); } -NotificationEventsCountRequest::NotificationEventsCountRequest(const NotificationEventsCountRequest& other731) { - fromEventId = other731.fromEventId; - dbName = other731.dbName; +NotificationEventsCountRequest::NotificationEventsCountRequest(const NotificationEventsCountRequest& other773) { + fromEventId = other773.fromEventId; + dbName = other773.dbName; } -NotificationEventsCountRequest& NotificationEventsCountRequest::operator=(const NotificationEventsCountRequest& other732) { - fromEventId = other732.fromEventId; - dbName = other732.dbName; +NotificationEventsCountRequest& NotificationEventsCountRequest::operator=(const NotificationEventsCountRequest& other774) { + fromEventId = other774.fromEventId; + dbName = other774.dbName; return *this; } void NotificationEventsCountRequest::printTo(std::ostream& out) const { @@ -18318,11 +19120,11 @@ void swap(NotificationEventsCountResponse &a, NotificationEventsCountResponse &b swap(a.eventsCount, b.eventsCount); } -NotificationEventsCountResponse::NotificationEventsCountResponse(const NotificationEventsCountResponse& other733) { - eventsCount = other733.eventsCount; +NotificationEventsCountResponse::NotificationEventsCountResponse(const NotificationEventsCountResponse& other775) { + eventsCount = other775.eventsCount; } -NotificationEventsCountResponse& NotificationEventsCountResponse::operator=(const NotificationEventsCountResponse& other734) { - eventsCount = other734.eventsCount; +NotificationEventsCountResponse& NotificationEventsCountResponse::operator=(const NotificationEventsCountResponse& other776) { + eventsCount = other776.eventsCount; return *this; } void NotificationEventsCountResponse::printTo(std::ostream& out) const { @@ -18385,14 +19187,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAdded.clear(); - uint32_t _size735; - ::apache::thrift::protocol::TType _etype738; - xfer += iprot->readListBegin(_etype738, _size735); - this->filesAdded.resize(_size735); - uint32_t _i739; - for (_i739 = 0; _i739 < _size735; ++_i739) + uint32_t _size777; + ::apache::thrift::protocol::TType _etype780; + xfer += iprot->readListBegin(_etype780, _size777); + this->filesAdded.resize(_size777); + uint32_t _i781; + for (_i781 = 0; _i781 < _size777; ++_i781) { - xfer += iprot->readString(this->filesAdded[_i739]); + xfer += iprot->readString(this->filesAdded[_i781]); } xfer += iprot->readListEnd(); } @@ -18405,14 +19207,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAddedChecksum.clear(); - uint32_t _size740; - ::apache::thrift::protocol::TType _etype743; - xfer += iprot->readListBegin(_etype743, _size740); - this->filesAddedChecksum.resize(_size740); - uint32_t _i744; - for (_i744 = 0; _i744 < _size740; ++_i744) + uint32_t _size782; + ::apache::thrift::protocol::TType _etype785; + xfer += iprot->readListBegin(_etype785, _size782); + this->filesAddedChecksum.resize(_size782); + uint32_t _i786; + for (_i786 = 0; _i786 < _size782; ++_i786) { - xfer += iprot->readString(this->filesAddedChecksum[_i744]); + xfer += iprot->readString(this->filesAddedChecksum[_i786]); } xfer += iprot->readListEnd(); } @@ -18448,10 +19250,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAdded", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAdded.size())); - std::vector ::const_iterator _iter745; - for (_iter745 = this->filesAdded.begin(); _iter745 != this->filesAdded.end(); ++_iter745) + std::vector ::const_iterator _iter787; + for (_iter787 = this->filesAdded.begin(); _iter787 != this->filesAdded.end(); ++_iter787) { - xfer += oprot->writeString((*_iter745)); + xfer += oprot->writeString((*_iter787)); } xfer += oprot->writeListEnd(); } @@ -18461,10 +19263,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAddedChecksum", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAddedChecksum.size())); - std::vector ::const_iterator _iter746; - for (_iter746 = this->filesAddedChecksum.begin(); _iter746 != this->filesAddedChecksum.end(); ++_iter746) + std::vector ::const_iterator _iter788; + for (_iter788 = this->filesAddedChecksum.begin(); _iter788 != this->filesAddedChecksum.end(); ++_iter788) { - xfer += oprot->writeString((*_iter746)); + xfer += oprot->writeString((*_iter788)); } xfer += oprot->writeListEnd(); } @@ -18483,17 +19285,17 @@ void swap(InsertEventRequestData &a, InsertEventRequestData &b) { swap(a.__isset, b.__isset); } -InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other747) { - replace = other747.replace; - filesAdded = other747.filesAdded; - filesAddedChecksum = other747.filesAddedChecksum; - __isset = other747.__isset; +InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other789) { + replace = other789.replace; + filesAdded = other789.filesAdded; + filesAddedChecksum = other789.filesAddedChecksum; + __isset = other789.__isset; } -InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other748) { - replace = other748.replace; - filesAdded = other748.filesAdded; - filesAddedChecksum = other748.filesAddedChecksum; - __isset = other748.__isset; +InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other790) { + replace = other790.replace; + filesAdded = other790.filesAdded; + filesAddedChecksum = other790.filesAddedChecksum; + __isset = other790.__isset; return *this; } void InsertEventRequestData::printTo(std::ostream& out) const { @@ -18575,13 +19377,13 @@ void swap(FireEventRequestData &a, FireEventRequestData &b) { swap(a.__isset, b.__isset); } -FireEventRequestData::FireEventRequestData(const FireEventRequestData& other749) { - insertData = other749.insertData; - __isset = other749.__isset; +FireEventRequestData::FireEventRequestData(const FireEventRequestData& other791) { + insertData = other791.insertData; + __isset = other791.__isset; } -FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other750) { - insertData = other750.insertData; - __isset = other750.__isset; +FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other792) { + insertData = other792.insertData; + __isset = other792.__isset; return *this; } void FireEventRequestData::printTo(std::ostream& out) const { @@ -18678,14 +19480,14 @@ uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionVals.clear(); - uint32_t _size751; - ::apache::thrift::protocol::TType _etype754; - xfer += iprot->readListBegin(_etype754, _size751); - this->partitionVals.resize(_size751); - uint32_t _i755; - for (_i755 = 0; _i755 < _size751; ++_i755) + uint32_t _size793; + ::apache::thrift::protocol::TType _etype796; + xfer += iprot->readListBegin(_etype796, _size793); + this->partitionVals.resize(_size793); + uint32_t _i797; + for (_i797 = 0; _i797 < _size793; ++_i797) { - xfer += iprot->readString(this->partitionVals[_i755]); + xfer += iprot->readString(this->partitionVals[_i797]); } xfer += iprot->readListEnd(); } @@ -18737,10 +19539,10 @@ uint32_t FireEventRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("partitionVals", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionVals.size())); - std::vector ::const_iterator _iter756; - for (_iter756 = this->partitionVals.begin(); _iter756 != this->partitionVals.end(); ++_iter756) + std::vector ::const_iterator _iter798; + for (_iter798 = this->partitionVals.begin(); _iter798 != this->partitionVals.end(); ++_iter798) { - xfer += oprot->writeString((*_iter756)); + xfer += oprot->writeString((*_iter798)); } xfer += oprot->writeListEnd(); } @@ -18761,21 +19563,21 @@ void swap(FireEventRequest &a, FireEventRequest &b) { swap(a.__isset, b.__isset); } -FireEventRequest::FireEventRequest(const FireEventRequest& other757) { - successful = other757.successful; - data = other757.data; - dbName = other757.dbName; - tableName = other757.tableName; - partitionVals = other757.partitionVals; - __isset = other757.__isset; -} -FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other758) { - successful = other758.successful; - data = other758.data; - dbName = other758.dbName; - tableName = other758.tableName; - partitionVals = other758.partitionVals; - __isset = other758.__isset; +FireEventRequest::FireEventRequest(const FireEventRequest& other799) { + successful = other799.successful; + data = other799.data; + dbName = other799.dbName; + tableName = other799.tableName; + partitionVals = other799.partitionVals; + __isset = other799.__isset; +} +FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other800) { + successful = other800.successful; + data = other800.data; + dbName = other800.dbName; + tableName = other800.tableName; + partitionVals = other800.partitionVals; + __isset = other800.__isset; return *this; } void FireEventRequest::printTo(std::ostream& out) const { @@ -18838,11 +19640,11 @@ void swap(FireEventResponse &a, FireEventResponse &b) { (void) b; } -FireEventResponse::FireEventResponse(const FireEventResponse& other759) { - (void) other759; +FireEventResponse::FireEventResponse(const FireEventResponse& other801) { + (void) other801; } -FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other760) { - (void) other760; +FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other802) { + (void) other802; return *this; } void FireEventResponse::printTo(std::ostream& out) const { @@ -18942,15 +19744,15 @@ void swap(MetadataPpdResult &a, MetadataPpdResult &b) { swap(a.__isset, b.__isset); } -MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other761) { - metadata = other761.metadata; - includeBitset = other761.includeBitset; - __isset = other761.__isset; +MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other803) { + metadata = other803.metadata; + includeBitset = other803.includeBitset; + __isset = other803.__isset; } -MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other762) { - metadata = other762.metadata; - includeBitset = other762.includeBitset; - __isset = other762.__isset; +MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other804) { + metadata = other804.metadata; + includeBitset = other804.includeBitset; + __isset = other804.__isset; return *this; } void MetadataPpdResult::printTo(std::ostream& out) const { @@ -19001,17 +19803,17 @@ uint32_t GetFileMetadataByExprResult::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size763; - ::apache::thrift::protocol::TType _ktype764; - ::apache::thrift::protocol::TType _vtype765; - xfer += iprot->readMapBegin(_ktype764, _vtype765, _size763); - uint32_t _i767; - for (_i767 = 0; _i767 < _size763; ++_i767) + uint32_t _size805; + ::apache::thrift::protocol::TType _ktype806; + ::apache::thrift::protocol::TType _vtype807; + xfer += iprot->readMapBegin(_ktype806, _vtype807, _size805); + uint32_t _i809; + for (_i809 = 0; _i809 < _size805; ++_i809) { - int64_t _key768; - xfer += iprot->readI64(_key768); - MetadataPpdResult& _val769 = this->metadata[_key768]; - xfer += _val769.read(iprot); + int64_t _key810; + xfer += iprot->readI64(_key810); + MetadataPpdResult& _val811 = this->metadata[_key810]; + xfer += _val811.read(iprot); } xfer += iprot->readMapEnd(); } @@ -19052,11 +19854,11 @@ uint32_t GetFileMetadataByExprResult::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRUCT, static_cast(this->metadata.size())); - std::map ::const_iterator _iter770; - for (_iter770 = this->metadata.begin(); _iter770 != this->metadata.end(); ++_iter770) + std::map ::const_iterator _iter812; + for (_iter812 = this->metadata.begin(); _iter812 != this->metadata.end(); ++_iter812) { - xfer += oprot->writeI64(_iter770->first); - xfer += _iter770->second.write(oprot); + xfer += oprot->writeI64(_iter812->first); + xfer += _iter812->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -19077,13 +19879,13 @@ void swap(GetFileMetadataByExprResult &a, GetFileMetadataByExprResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other771) { - metadata = other771.metadata; - isSupported = other771.isSupported; +GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other813) { + metadata = other813.metadata; + isSupported = other813.isSupported; } -GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other772) { - metadata = other772.metadata; - isSupported = other772.isSupported; +GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other814) { + metadata = other814.metadata; + isSupported = other814.isSupported; return *this; } void GetFileMetadataByExprResult::printTo(std::ostream& out) const { @@ -19144,14 +19946,14 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size773; - ::apache::thrift::protocol::TType _etype776; - xfer += iprot->readListBegin(_etype776, _size773); - this->fileIds.resize(_size773); - uint32_t _i777; - for (_i777 = 0; _i777 < _size773; ++_i777) + uint32_t _size815; + ::apache::thrift::protocol::TType _etype818; + xfer += iprot->readListBegin(_etype818, _size815); + this->fileIds.resize(_size815); + uint32_t _i819; + for (_i819 = 0; _i819 < _size815; ++_i819) { - xfer += iprot->readI64(this->fileIds[_i777]); + xfer += iprot->readI64(this->fileIds[_i819]); } xfer += iprot->readListEnd(); } @@ -19178,9 +19980,9 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast778; - xfer += iprot->readI32(ecast778); - this->type = (FileMetadataExprType::type)ecast778; + int32_t ecast820; + xfer += iprot->readI32(ecast820); + this->type = (FileMetadataExprType::type)ecast820; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -19210,10 +20012,10 @@ uint32_t GetFileMetadataByExprRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter779; - for (_iter779 = this->fileIds.begin(); _iter779 != this->fileIds.end(); ++_iter779) + std::vector ::const_iterator _iter821; + for (_iter821 = this->fileIds.begin(); _iter821 != this->fileIds.end(); ++_iter821) { - xfer += oprot->writeI64((*_iter779)); + xfer += oprot->writeI64((*_iter821)); } xfer += oprot->writeListEnd(); } @@ -19247,19 +20049,19 @@ void swap(GetFileMetadataByExprRequest &a, GetFileMetadataByExprRequest &b) { swap(a.__isset, b.__isset); } -GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other780) { - fileIds = other780.fileIds; - expr = other780.expr; - doGetFooters = other780.doGetFooters; - type = other780.type; - __isset = other780.__isset; +GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other822) { + fileIds = other822.fileIds; + expr = other822.expr; + doGetFooters = other822.doGetFooters; + type = other822.type; + __isset = other822.__isset; } -GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other781) { - fileIds = other781.fileIds; - expr = other781.expr; - doGetFooters = other781.doGetFooters; - type = other781.type; - __isset = other781.__isset; +GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other823) { + fileIds = other823.fileIds; + expr = other823.expr; + doGetFooters = other823.doGetFooters; + type = other823.type; + __isset = other823.__isset; return *this; } void GetFileMetadataByExprRequest::printTo(std::ostream& out) const { @@ -19312,17 +20114,17 @@ uint32_t GetFileMetadataResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size782; - ::apache::thrift::protocol::TType _ktype783; - ::apache::thrift::protocol::TType _vtype784; - xfer += iprot->readMapBegin(_ktype783, _vtype784, _size782); - uint32_t _i786; - for (_i786 = 0; _i786 < _size782; ++_i786) + uint32_t _size824; + ::apache::thrift::protocol::TType _ktype825; + ::apache::thrift::protocol::TType _vtype826; + xfer += iprot->readMapBegin(_ktype825, _vtype826, _size824); + uint32_t _i828; + for (_i828 = 0; _i828 < _size824; ++_i828) { - int64_t _key787; - xfer += iprot->readI64(_key787); - std::string& _val788 = this->metadata[_key787]; - xfer += iprot->readBinary(_val788); + int64_t _key829; + xfer += iprot->readI64(_key829); + std::string& _val830 = this->metadata[_key829]; + xfer += iprot->readBinary(_val830); } xfer += iprot->readMapEnd(); } @@ -19363,11 +20165,11 @@ uint32_t GetFileMetadataResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::map ::const_iterator _iter789; - for (_iter789 = this->metadata.begin(); _iter789 != this->metadata.end(); ++_iter789) + std::map ::const_iterator _iter831; + for (_iter831 = this->metadata.begin(); _iter831 != this->metadata.end(); ++_iter831) { - xfer += oprot->writeI64(_iter789->first); - xfer += oprot->writeBinary(_iter789->second); + xfer += oprot->writeI64(_iter831->first); + xfer += oprot->writeBinary(_iter831->second); } xfer += oprot->writeMapEnd(); } @@ -19388,13 +20190,13 @@ void swap(GetFileMetadataResult &a, GetFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other790) { - metadata = other790.metadata; - isSupported = other790.isSupported; +GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other832) { + metadata = other832.metadata; + isSupported = other832.isSupported; } -GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other791) { - metadata = other791.metadata; - isSupported = other791.isSupported; +GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other833) { + metadata = other833.metadata; + isSupported = other833.isSupported; return *this; } void GetFileMetadataResult::printTo(std::ostream& out) const { @@ -19440,14 +20242,14 @@ uint32_t GetFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size792; - ::apache::thrift::protocol::TType _etype795; - xfer += iprot->readListBegin(_etype795, _size792); - this->fileIds.resize(_size792); - uint32_t _i796; - for (_i796 = 0; _i796 < _size792; ++_i796) + uint32_t _size834; + ::apache::thrift::protocol::TType _etype837; + xfer += iprot->readListBegin(_etype837, _size834); + this->fileIds.resize(_size834); + uint32_t _i838; + for (_i838 = 0; _i838 < _size834; ++_i838) { - xfer += iprot->readI64(this->fileIds[_i796]); + xfer += iprot->readI64(this->fileIds[_i838]); } xfer += iprot->readListEnd(); } @@ -19478,10 +20280,10 @@ uint32_t GetFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter797; - for (_iter797 = this->fileIds.begin(); _iter797 != this->fileIds.end(); ++_iter797) + std::vector ::const_iterator _iter839; + for (_iter839 = this->fileIds.begin(); _iter839 != this->fileIds.end(); ++_iter839) { - xfer += oprot->writeI64((*_iter797)); + xfer += oprot->writeI64((*_iter839)); } xfer += oprot->writeListEnd(); } @@ -19497,11 +20299,11 @@ void swap(GetFileMetadataRequest &a, GetFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other798) { - fileIds = other798.fileIds; +GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other840) { + fileIds = other840.fileIds; } -GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other799) { - fileIds = other799.fileIds; +GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other841) { + fileIds = other841.fileIds; return *this; } void GetFileMetadataRequest::printTo(std::ostream& out) const { @@ -19560,11 +20362,11 @@ void swap(PutFileMetadataResult &a, PutFileMetadataResult &b) { (void) b; } -PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other800) { - (void) other800; +PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other842) { + (void) other842; } -PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other801) { - (void) other801; +PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other843) { + (void) other843; return *this; } void PutFileMetadataResult::printTo(std::ostream& out) const { @@ -19618,14 +20420,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size802; - ::apache::thrift::protocol::TType _etype805; - xfer += iprot->readListBegin(_etype805, _size802); - this->fileIds.resize(_size802); - uint32_t _i806; - for (_i806 = 0; _i806 < _size802; ++_i806) + uint32_t _size844; + ::apache::thrift::protocol::TType _etype847; + xfer += iprot->readListBegin(_etype847, _size844); + this->fileIds.resize(_size844); + uint32_t _i848; + for (_i848 = 0; _i848 < _size844; ++_i848) { - xfer += iprot->readI64(this->fileIds[_i806]); + xfer += iprot->readI64(this->fileIds[_i848]); } xfer += iprot->readListEnd(); } @@ -19638,14 +20440,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->metadata.clear(); - uint32_t _size807; - ::apache::thrift::protocol::TType _etype810; - xfer += iprot->readListBegin(_etype810, _size807); - this->metadata.resize(_size807); - uint32_t _i811; - for (_i811 = 0; _i811 < _size807; ++_i811) + uint32_t _size849; + ::apache::thrift::protocol::TType _etype852; + xfer += iprot->readListBegin(_etype852, _size849); + this->metadata.resize(_size849); + uint32_t _i853; + for (_i853 = 0; _i853 < _size849; ++_i853) { - xfer += iprot->readBinary(this->metadata[_i811]); + xfer += iprot->readBinary(this->metadata[_i853]); } xfer += iprot->readListEnd(); } @@ -19656,9 +20458,9 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast812; - xfer += iprot->readI32(ecast812); - this->type = (FileMetadataExprType::type)ecast812; + int32_t ecast854; + xfer += iprot->readI32(ecast854); + this->type = (FileMetadataExprType::type)ecast854; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -19688,10 +20490,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter813; - for (_iter813 = this->fileIds.begin(); _iter813 != this->fileIds.end(); ++_iter813) + std::vector ::const_iterator _iter855; + for (_iter855 = this->fileIds.begin(); _iter855 != this->fileIds.end(); ++_iter855) { - xfer += oprot->writeI64((*_iter813)); + xfer += oprot->writeI64((*_iter855)); } xfer += oprot->writeListEnd(); } @@ -19700,10 +20502,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::vector ::const_iterator _iter814; - for (_iter814 = this->metadata.begin(); _iter814 != this->metadata.end(); ++_iter814) + std::vector ::const_iterator _iter856; + for (_iter856 = this->metadata.begin(); _iter856 != this->metadata.end(); ++_iter856) { - xfer += oprot->writeBinary((*_iter814)); + xfer += oprot->writeBinary((*_iter856)); } xfer += oprot->writeListEnd(); } @@ -19727,17 +20529,17 @@ void swap(PutFileMetadataRequest &a, PutFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other815) { - fileIds = other815.fileIds; - metadata = other815.metadata; - type = other815.type; - __isset = other815.__isset; +PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other857) { + fileIds = other857.fileIds; + metadata = other857.metadata; + type = other857.type; + __isset = other857.__isset; } -PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other816) { - fileIds = other816.fileIds; - metadata = other816.metadata; - type = other816.type; - __isset = other816.__isset; +PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other858) { + fileIds = other858.fileIds; + metadata = other858.metadata; + type = other858.type; + __isset = other858.__isset; return *this; } void PutFileMetadataRequest::printTo(std::ostream& out) const { @@ -19798,11 +20600,11 @@ void swap(ClearFileMetadataResult &a, ClearFileMetadataResult &b) { (void) b; } -ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other817) { - (void) other817; +ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other859) { + (void) other859; } -ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other818) { - (void) other818; +ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other860) { + (void) other860; return *this; } void ClearFileMetadataResult::printTo(std::ostream& out) const { @@ -19846,14 +20648,14 @@ uint32_t ClearFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size819; - ::apache::thrift::protocol::TType _etype822; - xfer += iprot->readListBegin(_etype822, _size819); - this->fileIds.resize(_size819); - uint32_t _i823; - for (_i823 = 0; _i823 < _size819; ++_i823) + uint32_t _size861; + ::apache::thrift::protocol::TType _etype864; + xfer += iprot->readListBegin(_etype864, _size861); + this->fileIds.resize(_size861); + uint32_t _i865; + for (_i865 = 0; _i865 < _size861; ++_i865) { - xfer += iprot->readI64(this->fileIds[_i823]); + xfer += iprot->readI64(this->fileIds[_i865]); } xfer += iprot->readListEnd(); } @@ -19884,10 +20686,10 @@ uint32_t ClearFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter824; - for (_iter824 = this->fileIds.begin(); _iter824 != this->fileIds.end(); ++_iter824) + std::vector ::const_iterator _iter866; + for (_iter866 = this->fileIds.begin(); _iter866 != this->fileIds.end(); ++_iter866) { - xfer += oprot->writeI64((*_iter824)); + xfer += oprot->writeI64((*_iter866)); } xfer += oprot->writeListEnd(); } @@ -19903,11 +20705,11 @@ void swap(ClearFileMetadataRequest &a, ClearFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other825) { - fileIds = other825.fileIds; +ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other867) { + fileIds = other867.fileIds; } -ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other826) { - fileIds = other826.fileIds; +ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other868) { + fileIds = other868.fileIds; return *this; } void ClearFileMetadataRequest::printTo(std::ostream& out) const { @@ -19989,11 +20791,11 @@ void swap(CacheFileMetadataResult &a, CacheFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other827) { - isSupported = other827.isSupported; +CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other869) { + isSupported = other869.isSupported; } -CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other828) { - isSupported = other828.isSupported; +CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other870) { + isSupported = other870.isSupported; return *this; } void CacheFileMetadataResult::printTo(std::ostream& out) const { @@ -20134,19 +20936,19 @@ void swap(CacheFileMetadataRequest &a, CacheFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other829) { - dbName = other829.dbName; - tblName = other829.tblName; - partName = other829.partName; - isAllParts = other829.isAllParts; - __isset = other829.__isset; +CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other871) { + dbName = other871.dbName; + tblName = other871.tblName; + partName = other871.partName; + isAllParts = other871.isAllParts; + __isset = other871.__isset; } -CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other830) { - dbName = other830.dbName; - tblName = other830.tblName; - partName = other830.partName; - isAllParts = other830.isAllParts; - __isset = other830.__isset; +CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other872) { + dbName = other872.dbName; + tblName = other872.tblName; + partName = other872.partName; + isAllParts = other872.isAllParts; + __isset = other872.__isset; return *this; } void CacheFileMetadataRequest::printTo(std::ostream& out) const { @@ -20194,14 +20996,14 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->functions.clear(); - uint32_t _size831; - ::apache::thrift::protocol::TType _etype834; - xfer += iprot->readListBegin(_etype834, _size831); - this->functions.resize(_size831); - uint32_t _i835; - for (_i835 = 0; _i835 < _size831; ++_i835) + uint32_t _size873; + ::apache::thrift::protocol::TType _etype876; + xfer += iprot->readListBegin(_etype876, _size873); + this->functions.resize(_size873); + uint32_t _i877; + for (_i877 = 0; _i877 < _size873; ++_i877) { - xfer += this->functions[_i835].read(iprot); + xfer += this->functions[_i877].read(iprot); } xfer += iprot->readListEnd(); } @@ -20231,10 +21033,10 @@ uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); - std::vector ::const_iterator _iter836; - for (_iter836 = this->functions.begin(); _iter836 != this->functions.end(); ++_iter836) + std::vector ::const_iterator _iter878; + for (_iter878 = this->functions.begin(); _iter878 != this->functions.end(); ++_iter878) { - xfer += (*_iter836).write(oprot); + xfer += (*_iter878).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20251,13 +21053,13 @@ void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { swap(a.__isset, b.__isset); } -GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other837) { - functions = other837.functions; - __isset = other837.__isset; +GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other879) { + functions = other879.functions; + __isset = other879.__isset; } -GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other838) { - functions = other838.functions; - __isset = other838.__isset; +GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other880) { + functions = other880.functions; + __isset = other880.__isset; return *this; } void GetAllFunctionsResponse::printTo(std::ostream& out) const { @@ -20302,16 +21104,16 @@ uint32_t ClientCapabilities::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size839; - ::apache::thrift::protocol::TType _etype842; - xfer += iprot->readListBegin(_etype842, _size839); - this->values.resize(_size839); - uint32_t _i843; - for (_i843 = 0; _i843 < _size839; ++_i843) + uint32_t _size881; + ::apache::thrift::protocol::TType _etype884; + xfer += iprot->readListBegin(_etype884, _size881); + this->values.resize(_size881); + uint32_t _i885; + for (_i885 = 0; _i885 < _size881; ++_i885) { - int32_t ecast844; - xfer += iprot->readI32(ecast844); - this->values[_i843] = (ClientCapability::type)ecast844; + int32_t ecast886; + xfer += iprot->readI32(ecast886); + this->values[_i885] = (ClientCapability::type)ecast886; } xfer += iprot->readListEnd(); } @@ -20342,10 +21144,10 @@ uint32_t ClientCapabilities::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I32, static_cast(this->values.size())); - std::vector ::const_iterator _iter845; - for (_iter845 = this->values.begin(); _iter845 != this->values.end(); ++_iter845) + std::vector ::const_iterator _iter887; + for (_iter887 = this->values.begin(); _iter887 != this->values.end(); ++_iter887) { - xfer += oprot->writeI32((int32_t)(*_iter845)); + xfer += oprot->writeI32((int32_t)(*_iter887)); } xfer += oprot->writeListEnd(); } @@ -20361,11 +21163,11 @@ void swap(ClientCapabilities &a, ClientCapabilities &b) { swap(a.values, b.values); } -ClientCapabilities::ClientCapabilities(const ClientCapabilities& other846) { - values = other846.values; +ClientCapabilities::ClientCapabilities(const ClientCapabilities& other888) { + values = other888.values; } -ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other847) { - values = other847.values; +ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other889) { + values = other889.values; return *this; } void ClientCapabilities::printTo(std::ostream& out) const { @@ -20487,17 +21289,17 @@ void swap(GetTableRequest &a, GetTableRequest &b) { swap(a.__isset, b.__isset); } -GetTableRequest::GetTableRequest(const GetTableRequest& other848) { - dbName = other848.dbName; - tblName = other848.tblName; - capabilities = other848.capabilities; - __isset = other848.__isset; +GetTableRequest::GetTableRequest(const GetTableRequest& other890) { + dbName = other890.dbName; + tblName = other890.tblName; + capabilities = other890.capabilities; + __isset = other890.__isset; } -GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other849) { - dbName = other849.dbName; - tblName = other849.tblName; - capabilities = other849.capabilities; - __isset = other849.__isset; +GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other891) { + dbName = other891.dbName; + tblName = other891.tblName; + capabilities = other891.capabilities; + __isset = other891.__isset; return *this; } void GetTableRequest::printTo(std::ostream& out) const { @@ -20581,11 +21383,11 @@ void swap(GetTableResult &a, GetTableResult &b) { swap(a.table, b.table); } -GetTableResult::GetTableResult(const GetTableResult& other850) { - table = other850.table; +GetTableResult::GetTableResult(const GetTableResult& other892) { + table = other892.table; } -GetTableResult& GetTableResult::operator=(const GetTableResult& other851) { - table = other851.table; +GetTableResult& GetTableResult::operator=(const GetTableResult& other893) { + table = other893.table; return *this; } void GetTableResult::printTo(std::ostream& out) const { @@ -20648,14 +21450,14 @@ uint32_t GetTablesRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tblNames.clear(); - uint32_t _size852; - ::apache::thrift::protocol::TType _etype855; - xfer += iprot->readListBegin(_etype855, _size852); - this->tblNames.resize(_size852); - uint32_t _i856; - for (_i856 = 0; _i856 < _size852; ++_i856) + uint32_t _size894; + ::apache::thrift::protocol::TType _etype897; + xfer += iprot->readListBegin(_etype897, _size894); + this->tblNames.resize(_size894); + uint32_t _i898; + for (_i898 = 0; _i898 < _size894; ++_i898) { - xfer += iprot->readString(this->tblNames[_i856]); + xfer += iprot->readString(this->tblNames[_i898]); } xfer += iprot->readListEnd(); } @@ -20699,10 +21501,10 @@ uint32_t GetTablesRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tblNames", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tblNames.size())); - std::vector ::const_iterator _iter857; - for (_iter857 = this->tblNames.begin(); _iter857 != this->tblNames.end(); ++_iter857) + std::vector ::const_iterator _iter899; + for (_iter899 = this->tblNames.begin(); _iter899 != this->tblNames.end(); ++_iter899) { - xfer += oprot->writeString((*_iter857)); + xfer += oprot->writeString((*_iter899)); } xfer += oprot->writeListEnd(); } @@ -20726,17 +21528,17 @@ void swap(GetTablesRequest &a, GetTablesRequest &b) { swap(a.__isset, b.__isset); } -GetTablesRequest::GetTablesRequest(const GetTablesRequest& other858) { - dbName = other858.dbName; - tblNames = other858.tblNames; - capabilities = other858.capabilities; - __isset = other858.__isset; +GetTablesRequest::GetTablesRequest(const GetTablesRequest& other900) { + dbName = other900.dbName; + tblNames = other900.tblNames; + capabilities = other900.capabilities; + __isset = other900.__isset; } -GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other859) { - dbName = other859.dbName; - tblNames = other859.tblNames; - capabilities = other859.capabilities; - __isset = other859.__isset; +GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other901) { + dbName = other901.dbName; + tblNames = other901.tblNames; + capabilities = other901.capabilities; + __isset = other901.__isset; return *this; } void GetTablesRequest::printTo(std::ostream& out) const { @@ -20783,14 +21585,14 @@ uint32_t GetTablesResult::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tables.clear(); - uint32_t _size860; - ::apache::thrift::protocol::TType _etype863; - xfer += iprot->readListBegin(_etype863, _size860); - this->tables.resize(_size860); - uint32_t _i864; - for (_i864 = 0; _i864 < _size860; ++_i864) + uint32_t _size902; + ::apache::thrift::protocol::TType _etype905; + xfer += iprot->readListBegin(_etype905, _size902); + this->tables.resize(_size902); + uint32_t _i906; + for (_i906 = 0; _i906 < _size902; ++_i906) { - xfer += this->tables[_i864].read(iprot); + xfer += this->tables[_i906].read(iprot); } xfer += iprot->readListEnd(); } @@ -20821,10 +21623,10 @@ uint32_t GetTablesResult::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeFieldBegin("tables", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tables.size())); - std::vector
::const_iterator _iter865; - for (_iter865 = this->tables.begin(); _iter865 != this->tables.end(); ++_iter865) + std::vector
::const_iterator _iter907; + for (_iter907 = this->tables.begin(); _iter907 != this->tables.end(); ++_iter907) { - xfer += (*_iter865).write(oprot); + xfer += (*_iter907).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20840,11 +21642,11 @@ void swap(GetTablesResult &a, GetTablesResult &b) { swap(a.tables, b.tables); } -GetTablesResult::GetTablesResult(const GetTablesResult& other866) { - tables = other866.tables; +GetTablesResult::GetTablesResult(const GetTablesResult& other908) { + tables = other908.tables; } -GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other867) { - tables = other867.tables; +GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other909) { + tables = other909.tables; return *this; } void GetTablesResult::printTo(std::ostream& out) const { @@ -20946,13 +21748,13 @@ void swap(CmRecycleRequest &a, CmRecycleRequest &b) { swap(a.purge, b.purge); } -CmRecycleRequest::CmRecycleRequest(const CmRecycleRequest& other868) { - dataPath = other868.dataPath; - purge = other868.purge; +CmRecycleRequest::CmRecycleRequest(const CmRecycleRequest& other910) { + dataPath = other910.dataPath; + purge = other910.purge; } -CmRecycleRequest& CmRecycleRequest::operator=(const CmRecycleRequest& other869) { - dataPath = other869.dataPath; - purge = other869.purge; +CmRecycleRequest& CmRecycleRequest::operator=(const CmRecycleRequest& other911) { + dataPath = other911.dataPath; + purge = other911.purge; return *this; } void CmRecycleRequest::printTo(std::ostream& out) const { @@ -21012,11 +21814,11 @@ void swap(CmRecycleResponse &a, CmRecycleResponse &b) { (void) b; } -CmRecycleResponse::CmRecycleResponse(const CmRecycleResponse& other870) { - (void) other870; +CmRecycleResponse::CmRecycleResponse(const CmRecycleResponse& other912) { + (void) other912; } -CmRecycleResponse& CmRecycleResponse::operator=(const CmRecycleResponse& other871) { - (void) other871; +CmRecycleResponse& CmRecycleResponse::operator=(const CmRecycleResponse& other913) { + (void) other913; return *this; } void CmRecycleResponse::printTo(std::ostream& out) const { @@ -21157,19 +21959,19 @@ void swap(TableMeta &a, TableMeta &b) { swap(a.__isset, b.__isset); } -TableMeta::TableMeta(const TableMeta& other872) { - dbName = other872.dbName; - tableName = other872.tableName; - tableType = other872.tableType; - comments = other872.comments; - __isset = other872.__isset; +TableMeta::TableMeta(const TableMeta& other914) { + dbName = other914.dbName; + tableName = other914.tableName; + tableType = other914.tableType; + comments = other914.comments; + __isset = other914.__isset; } -TableMeta& TableMeta::operator=(const TableMeta& other873) { - dbName = other873.dbName; - tableName = other873.tableName; - tableType = other873.tableType; - comments = other873.comments; - __isset = other873.__isset; +TableMeta& TableMeta::operator=(const TableMeta& other915) { + dbName = other915.dbName; + tableName = other915.tableName; + tableType = other915.tableType; + comments = other915.comments; + __isset = other915.__isset; return *this; } void TableMeta::printTo(std::ostream& out) const { @@ -21235,15 +22037,15 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_SET) { { this->tablesUsed.clear(); - uint32_t _size874; - ::apache::thrift::protocol::TType _etype877; - xfer += iprot->readSetBegin(_etype877, _size874); - uint32_t _i878; - for (_i878 = 0; _i878 < _size874; ++_i878) + uint32_t _size916; + ::apache::thrift::protocol::TType _etype919; + xfer += iprot->readSetBegin(_etype919, _size916); + uint32_t _i920; + for (_i920 = 0; _i920 < _size916; ++_i920) { - std::string _elem879; - xfer += iprot->readString(_elem879); - this->tablesUsed.insert(_elem879); + std::string _elem921; + xfer += iprot->readString(_elem921); + this->tablesUsed.insert(_elem921); } xfer += iprot->readSetEnd(); } @@ -21290,10 +22092,10 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tablesUsed.size())); - std::set ::const_iterator _iter880; - for (_iter880 = this->tablesUsed.begin(); _iter880 != this->tablesUsed.end(); ++_iter880) + std::set ::const_iterator _iter922; + for (_iter922 = this->tablesUsed.begin(); _iter922 != this->tablesUsed.end(); ++_iter922) { - xfer += oprot->writeString((*_iter880)); + xfer += oprot->writeString((*_iter922)); } xfer += oprot->writeSetEnd(); } @@ -21315,15 +22117,15 @@ void swap(Materialization &a, Materialization &b) { swap(a.invalidationTime, b.invalidationTime); } -Materialization::Materialization(const Materialization& other881) { - materializationTable = other881.materializationTable; - tablesUsed = other881.tablesUsed; - invalidationTime = other881.invalidationTime; +Materialization::Materialization(const Materialization& other923) { + materializationTable = other923.materializationTable; + tablesUsed = other923.tablesUsed; + invalidationTime = other923.invalidationTime; } -Materialization& Materialization::operator=(const Materialization& other882) { - materializationTable = other882.materializationTable; - tablesUsed = other882.tablesUsed; - invalidationTime = other882.invalidationTime; +Materialization& Materialization::operator=(const Materialization& other924) { + materializationTable = other924.materializationTable; + tablesUsed = other924.tablesUsed; + invalidationTime = other924.invalidationTime; return *this; } void Materialization::printTo(std::ostream& out) const { @@ -21391,9 +22193,9 @@ uint32_t WMResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast883; - xfer += iprot->readI32(ecast883); - this->status = (WMResourcePlanStatus::type)ecast883; + int32_t ecast925; + xfer += iprot->readI32(ecast925); + this->status = (WMResourcePlanStatus::type)ecast925; this->__isset.status = true; } else { xfer += iprot->skip(ftype); @@ -21467,19 +22269,19 @@ void swap(WMResourcePlan &a, WMResourcePlan &b) { swap(a.__isset, b.__isset); } -WMResourcePlan::WMResourcePlan(const WMResourcePlan& other884) { - name = other884.name; - status = other884.status; - queryParallelism = other884.queryParallelism; - defaultPoolPath = other884.defaultPoolPath; - __isset = other884.__isset; +WMResourcePlan::WMResourcePlan(const WMResourcePlan& other926) { + name = other926.name; + status = other926.status; + queryParallelism = other926.queryParallelism; + defaultPoolPath = other926.defaultPoolPath; + __isset = other926.__isset; } -WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other885) { - name = other885.name; - status = other885.status; - queryParallelism = other885.queryParallelism; - defaultPoolPath = other885.defaultPoolPath; - __isset = other885.__isset; +WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other927) { + name = other927.name; + status = other927.status; + queryParallelism = other927.queryParallelism; + defaultPoolPath = other927.defaultPoolPath; + __isset = other927.__isset; return *this; } void WMResourcePlan::printTo(std::ostream& out) const { @@ -21558,9 +22360,9 @@ uint32_t WMNullableResourcePlan::read(::apache::thrift::protocol::TProtocol* ipr break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast886; - xfer += iprot->readI32(ecast886); - this->status = (WMResourcePlanStatus::type)ecast886; + int32_t ecast928; + xfer += iprot->readI32(ecast928); + this->status = (WMResourcePlanStatus::type)ecast928; this->__isset.status = true; } else { xfer += iprot->skip(ftype); @@ -21662,23 +22464,23 @@ void swap(WMNullableResourcePlan &a, WMNullableResourcePlan &b) { swap(a.__isset, b.__isset); } -WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other887) { - name = other887.name; - status = other887.status; - queryParallelism = other887.queryParallelism; - isSetQueryParallelism = other887.isSetQueryParallelism; - defaultPoolPath = other887.defaultPoolPath; - isSetDefaultPoolPath = other887.isSetDefaultPoolPath; - __isset = other887.__isset; -} -WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other888) { - name = other888.name; - status = other888.status; - queryParallelism = other888.queryParallelism; - isSetQueryParallelism = other888.isSetQueryParallelism; - defaultPoolPath = other888.defaultPoolPath; - isSetDefaultPoolPath = other888.isSetDefaultPoolPath; - __isset = other888.__isset; +WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other929) { + name = other929.name; + status = other929.status; + queryParallelism = other929.queryParallelism; + isSetQueryParallelism = other929.isSetQueryParallelism; + defaultPoolPath = other929.defaultPoolPath; + isSetDefaultPoolPath = other929.isSetDefaultPoolPath; + __isset = other929.__isset; +} +WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other930) { + name = other930.name; + status = other930.status; + queryParallelism = other930.queryParallelism; + isSetQueryParallelism = other930.isSetQueryParallelism; + defaultPoolPath = other930.defaultPoolPath; + isSetDefaultPoolPath = other930.isSetDefaultPoolPath; + __isset = other930.__isset; return *this; } void WMNullableResourcePlan::printTo(std::ostream& out) const { @@ -21843,21 +22645,21 @@ void swap(WMPool &a, WMPool &b) { swap(a.__isset, b.__isset); } -WMPool::WMPool(const WMPool& other889) { - resourcePlanName = other889.resourcePlanName; - poolPath = other889.poolPath; - allocFraction = other889.allocFraction; - queryParallelism = other889.queryParallelism; - schedulingPolicy = other889.schedulingPolicy; - __isset = other889.__isset; -} -WMPool& WMPool::operator=(const WMPool& other890) { - resourcePlanName = other890.resourcePlanName; - poolPath = other890.poolPath; - allocFraction = other890.allocFraction; - queryParallelism = other890.queryParallelism; - schedulingPolicy = other890.schedulingPolicy; - __isset = other890.__isset; +WMPool::WMPool(const WMPool& other931) { + resourcePlanName = other931.resourcePlanName; + poolPath = other931.poolPath; + allocFraction = other931.allocFraction; + queryParallelism = other931.queryParallelism; + schedulingPolicy = other931.schedulingPolicy; + __isset = other931.__isset; +} +WMPool& WMPool::operator=(const WMPool& other932) { + resourcePlanName = other932.resourcePlanName; + poolPath = other932.poolPath; + allocFraction = other932.allocFraction; + queryParallelism = other932.queryParallelism; + schedulingPolicy = other932.schedulingPolicy; + __isset = other932.__isset; return *this; } void WMPool::printTo(std::ostream& out) const { @@ -22040,23 +22842,23 @@ void swap(WMNullablePool &a, WMNullablePool &b) { swap(a.__isset, b.__isset); } -WMNullablePool::WMNullablePool(const WMNullablePool& other891) { - resourcePlanName = other891.resourcePlanName; - poolPath = other891.poolPath; - allocFraction = other891.allocFraction; - queryParallelism = other891.queryParallelism; - schedulingPolicy = other891.schedulingPolicy; - isSetSchedulingPolicy = other891.isSetSchedulingPolicy; - __isset = other891.__isset; +WMNullablePool::WMNullablePool(const WMNullablePool& other933) { + resourcePlanName = other933.resourcePlanName; + poolPath = other933.poolPath; + allocFraction = other933.allocFraction; + queryParallelism = other933.queryParallelism; + schedulingPolicy = other933.schedulingPolicy; + isSetSchedulingPolicy = other933.isSetSchedulingPolicy; + __isset = other933.__isset; } -WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other892) { - resourcePlanName = other892.resourcePlanName; - poolPath = other892.poolPath; - allocFraction = other892.allocFraction; - queryParallelism = other892.queryParallelism; - schedulingPolicy = other892.schedulingPolicy; - isSetSchedulingPolicy = other892.isSetSchedulingPolicy; - __isset = other892.__isset; +WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other934) { + resourcePlanName = other934.resourcePlanName; + poolPath = other934.poolPath; + allocFraction = other934.allocFraction; + queryParallelism = other934.queryParallelism; + schedulingPolicy = other934.schedulingPolicy; + isSetSchedulingPolicy = other934.isSetSchedulingPolicy; + __isset = other934.__isset; return *this; } void WMNullablePool::printTo(std::ostream& out) const { @@ -22221,21 +23023,21 @@ void swap(WMTrigger &a, WMTrigger &b) { swap(a.__isset, b.__isset); } -WMTrigger::WMTrigger(const WMTrigger& other893) { - resourcePlanName = other893.resourcePlanName; - triggerName = other893.triggerName; - triggerExpression = other893.triggerExpression; - actionExpression = other893.actionExpression; - isInUnmanaged = other893.isInUnmanaged; - __isset = other893.__isset; -} -WMTrigger& WMTrigger::operator=(const WMTrigger& other894) { - resourcePlanName = other894.resourcePlanName; - triggerName = other894.triggerName; - triggerExpression = other894.triggerExpression; - actionExpression = other894.actionExpression; - isInUnmanaged = other894.isInUnmanaged; - __isset = other894.__isset; +WMTrigger::WMTrigger(const WMTrigger& other935) { + resourcePlanName = other935.resourcePlanName; + triggerName = other935.triggerName; + triggerExpression = other935.triggerExpression; + actionExpression = other935.actionExpression; + isInUnmanaged = other935.isInUnmanaged; + __isset = other935.__isset; +} +WMTrigger& WMTrigger::operator=(const WMTrigger& other936) { + resourcePlanName = other936.resourcePlanName; + triggerName = other936.triggerName; + triggerExpression = other936.triggerExpression; + actionExpression = other936.actionExpression; + isInUnmanaged = other936.isInUnmanaged; + __isset = other936.__isset; return *this; } void WMTrigger::printTo(std::ostream& out) const { @@ -22400,21 +23202,21 @@ void swap(WMMapping &a, WMMapping &b) { swap(a.__isset, b.__isset); } -WMMapping::WMMapping(const WMMapping& other895) { - resourcePlanName = other895.resourcePlanName; - entityType = other895.entityType; - entityName = other895.entityName; - poolPath = other895.poolPath; - ordering = other895.ordering; - __isset = other895.__isset; -} -WMMapping& WMMapping::operator=(const WMMapping& other896) { - resourcePlanName = other896.resourcePlanName; - entityType = other896.entityType; - entityName = other896.entityName; - poolPath = other896.poolPath; - ordering = other896.ordering; - __isset = other896.__isset; +WMMapping::WMMapping(const WMMapping& other937) { + resourcePlanName = other937.resourcePlanName; + entityType = other937.entityType; + entityName = other937.entityName; + poolPath = other937.poolPath; + ordering = other937.ordering; + __isset = other937.__isset; +} +WMMapping& WMMapping::operator=(const WMMapping& other938) { + resourcePlanName = other938.resourcePlanName; + entityType = other938.entityType; + entityName = other938.entityName; + poolPath = other938.poolPath; + ordering = other938.ordering; + __isset = other938.__isset; return *this; } void WMMapping::printTo(std::ostream& out) const { @@ -22520,13 +23322,13 @@ void swap(WMPoolTrigger &a, WMPoolTrigger &b) { swap(a.trigger, b.trigger); } -WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other897) { - pool = other897.pool; - trigger = other897.trigger; +WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other939) { + pool = other939.pool; + trigger = other939.trigger; } -WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other898) { - pool = other898.pool; - trigger = other898.trigger; +WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other940) { + pool = other940.pool; + trigger = other940.trigger; return *this; } void WMPoolTrigger::printTo(std::ostream& out) const { @@ -22600,14 +23402,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->pools.clear(); - uint32_t _size899; - ::apache::thrift::protocol::TType _etype902; - xfer += iprot->readListBegin(_etype902, _size899); - this->pools.resize(_size899); - uint32_t _i903; - for (_i903 = 0; _i903 < _size899; ++_i903) + uint32_t _size941; + ::apache::thrift::protocol::TType _etype944; + xfer += iprot->readListBegin(_etype944, _size941); + this->pools.resize(_size941); + uint32_t _i945; + for (_i945 = 0; _i945 < _size941; ++_i945) { - xfer += this->pools[_i903].read(iprot); + xfer += this->pools[_i945].read(iprot); } xfer += iprot->readListEnd(); } @@ -22620,14 +23422,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->mappings.clear(); - uint32_t _size904; - ::apache::thrift::protocol::TType _etype907; - xfer += iprot->readListBegin(_etype907, _size904); - this->mappings.resize(_size904); - uint32_t _i908; - for (_i908 = 0; _i908 < _size904; ++_i908) + uint32_t _size946; + ::apache::thrift::protocol::TType _etype949; + xfer += iprot->readListBegin(_etype949, _size946); + this->mappings.resize(_size946); + uint32_t _i950; + for (_i950 = 0; _i950 < _size946; ++_i950) { - xfer += this->mappings[_i908].read(iprot); + xfer += this->mappings[_i950].read(iprot); } xfer += iprot->readListEnd(); } @@ -22640,14 +23442,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->triggers.clear(); - uint32_t _size909; - ::apache::thrift::protocol::TType _etype912; - xfer += iprot->readListBegin(_etype912, _size909); - this->triggers.resize(_size909); - uint32_t _i913; - for (_i913 = 0; _i913 < _size909; ++_i913) + uint32_t _size951; + ::apache::thrift::protocol::TType _etype954; + xfer += iprot->readListBegin(_etype954, _size951); + this->triggers.resize(_size951); + uint32_t _i955; + for (_i955 = 0; _i955 < _size951; ++_i955) { - xfer += this->triggers[_i913].read(iprot); + xfer += this->triggers[_i955].read(iprot); } xfer += iprot->readListEnd(); } @@ -22660,14 +23462,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->poolTriggers.clear(); - uint32_t _size914; - ::apache::thrift::protocol::TType _etype917; - xfer += iprot->readListBegin(_etype917, _size914); - this->poolTriggers.resize(_size914); - uint32_t _i918; - for (_i918 = 0; _i918 < _size914; ++_i918) + uint32_t _size956; + ::apache::thrift::protocol::TType _etype959; + xfer += iprot->readListBegin(_etype959, _size956); + this->poolTriggers.resize(_size956); + uint32_t _i960; + for (_i960 = 0; _i960 < _size956; ++_i960) { - xfer += this->poolTriggers[_i918].read(iprot); + xfer += this->poolTriggers[_i960].read(iprot); } xfer += iprot->readListEnd(); } @@ -22704,10 +23506,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("pools", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->pools.size())); - std::vector ::const_iterator _iter919; - for (_iter919 = this->pools.begin(); _iter919 != this->pools.end(); ++_iter919) + std::vector ::const_iterator _iter961; + for (_iter961 = this->pools.begin(); _iter961 != this->pools.end(); ++_iter961) { - xfer += (*_iter919).write(oprot); + xfer += (*_iter961).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22717,10 +23519,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("mappings", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->mappings.size())); - std::vector ::const_iterator _iter920; - for (_iter920 = this->mappings.begin(); _iter920 != this->mappings.end(); ++_iter920) + std::vector ::const_iterator _iter962; + for (_iter962 = this->mappings.begin(); _iter962 != this->mappings.end(); ++_iter962) { - xfer += (*_iter920).write(oprot); + xfer += (*_iter962).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22730,10 +23532,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->triggers.size())); - std::vector ::const_iterator _iter921; - for (_iter921 = this->triggers.begin(); _iter921 != this->triggers.end(); ++_iter921) + std::vector ::const_iterator _iter963; + for (_iter963 = this->triggers.begin(); _iter963 != this->triggers.end(); ++_iter963) { - xfer += (*_iter921).write(oprot); + xfer += (*_iter963).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22743,10 +23545,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("poolTriggers", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->poolTriggers.size())); - std::vector ::const_iterator _iter922; - for (_iter922 = this->poolTriggers.begin(); _iter922 != this->poolTriggers.end(); ++_iter922) + std::vector ::const_iterator _iter964; + for (_iter964 = this->poolTriggers.begin(); _iter964 != this->poolTriggers.end(); ++_iter964) { - xfer += (*_iter922).write(oprot); + xfer += (*_iter964).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22767,21 +23569,21 @@ void swap(WMFullResourcePlan &a, WMFullResourcePlan &b) { swap(a.__isset, b.__isset); } -WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other923) { - plan = other923.plan; - pools = other923.pools; - mappings = other923.mappings; - triggers = other923.triggers; - poolTriggers = other923.poolTriggers; - __isset = other923.__isset; -} -WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other924) { - plan = other924.plan; - pools = other924.pools; - mappings = other924.mappings; - triggers = other924.triggers; - poolTriggers = other924.poolTriggers; - __isset = other924.__isset; +WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other965) { + plan = other965.plan; + pools = other965.pools; + mappings = other965.mappings; + triggers = other965.triggers; + poolTriggers = other965.poolTriggers; + __isset = other965.__isset; +} +WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other966) { + plan = other966.plan; + pools = other966.pools; + mappings = other966.mappings; + triggers = other966.triggers; + poolTriggers = other966.poolTriggers; + __isset = other966.__isset; return *this; } void WMFullResourcePlan::printTo(std::ostream& out) const { @@ -22886,15 +23688,15 @@ void swap(WMCreateResourcePlanRequest &a, WMCreateResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other925) { - resourcePlan = other925.resourcePlan; - copyFrom = other925.copyFrom; - __isset = other925.__isset; +WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other967) { + resourcePlan = other967.resourcePlan; + copyFrom = other967.copyFrom; + __isset = other967.__isset; } -WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other926) { - resourcePlan = other926.resourcePlan; - copyFrom = other926.copyFrom; - __isset = other926.__isset; +WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other968) { + resourcePlan = other968.resourcePlan; + copyFrom = other968.copyFrom; + __isset = other968.__isset; return *this; } void WMCreateResourcePlanRequest::printTo(std::ostream& out) const { @@ -22954,11 +23756,11 @@ void swap(WMCreateResourcePlanResponse &a, WMCreateResourcePlanResponse &b) { (void) b; } -WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other927) { - (void) other927; +WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other969) { + (void) other969; } -WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other928) { - (void) other928; +WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other970) { + (void) other970; return *this; } void WMCreateResourcePlanResponse::printTo(std::ostream& out) const { @@ -23016,11 +23818,11 @@ void swap(WMGetActiveResourcePlanRequest &a, WMGetActiveResourcePlanRequest &b) (void) b; } -WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other929) { - (void) other929; +WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other971) { + (void) other971; } -WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other930) { - (void) other930; +WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other972) { + (void) other972; return *this; } void WMGetActiveResourcePlanRequest::printTo(std::ostream& out) const { @@ -23101,13 +23903,13 @@ void swap(WMGetActiveResourcePlanResponse &a, WMGetActiveResourcePlanResponse &b swap(a.__isset, b.__isset); } -WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other931) { - resourcePlan = other931.resourcePlan; - __isset = other931.__isset; +WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other973) { + resourcePlan = other973.resourcePlan; + __isset = other973.__isset; } -WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other932) { - resourcePlan = other932.resourcePlan; - __isset = other932.__isset; +WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other974) { + resourcePlan = other974.resourcePlan; + __isset = other974.__isset; return *this; } void WMGetActiveResourcePlanResponse::printTo(std::ostream& out) const { @@ -23189,13 +23991,13 @@ void swap(WMGetResourcePlanRequest &a, WMGetResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other933) { - resourcePlanName = other933.resourcePlanName; - __isset = other933.__isset; +WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other975) { + resourcePlanName = other975.resourcePlanName; + __isset = other975.__isset; } -WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other934) { - resourcePlanName = other934.resourcePlanName; - __isset = other934.__isset; +WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other976) { + resourcePlanName = other976.resourcePlanName; + __isset = other976.__isset; return *this; } void WMGetResourcePlanRequest::printTo(std::ostream& out) const { @@ -23277,13 +24079,13 @@ void swap(WMGetResourcePlanResponse &a, WMGetResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other935) { - resourcePlan = other935.resourcePlan; - __isset = other935.__isset; +WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other977) { + resourcePlan = other977.resourcePlan; + __isset = other977.__isset; } -WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other936) { - resourcePlan = other936.resourcePlan; - __isset = other936.__isset; +WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other978) { + resourcePlan = other978.resourcePlan; + __isset = other978.__isset; return *this; } void WMGetResourcePlanResponse::printTo(std::ostream& out) const { @@ -23342,11 +24144,11 @@ void swap(WMGetAllResourcePlanRequest &a, WMGetAllResourcePlanRequest &b) { (void) b; } -WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other937) { - (void) other937; +WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other979) { + (void) other979; } -WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other938) { - (void) other938; +WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other980) { + (void) other980; return *this; } void WMGetAllResourcePlanRequest::printTo(std::ostream& out) const { @@ -23390,14 +24192,14 @@ uint32_t WMGetAllResourcePlanResponse::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->resourcePlans.clear(); - uint32_t _size939; - ::apache::thrift::protocol::TType _etype942; - xfer += iprot->readListBegin(_etype942, _size939); - this->resourcePlans.resize(_size939); - uint32_t _i943; - for (_i943 = 0; _i943 < _size939; ++_i943) + uint32_t _size981; + ::apache::thrift::protocol::TType _etype984; + xfer += iprot->readListBegin(_etype984, _size981); + this->resourcePlans.resize(_size981); + uint32_t _i985; + for (_i985 = 0; _i985 < _size981; ++_i985) { - xfer += this->resourcePlans[_i943].read(iprot); + xfer += this->resourcePlans[_i985].read(iprot); } xfer += iprot->readListEnd(); } @@ -23427,10 +24229,10 @@ uint32_t WMGetAllResourcePlanResponse::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("resourcePlans", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourcePlans.size())); - std::vector ::const_iterator _iter944; - for (_iter944 = this->resourcePlans.begin(); _iter944 != this->resourcePlans.end(); ++_iter944) + std::vector ::const_iterator _iter986; + for (_iter986 = this->resourcePlans.begin(); _iter986 != this->resourcePlans.end(); ++_iter986) { - xfer += (*_iter944).write(oprot); + xfer += (*_iter986).write(oprot); } xfer += oprot->writeListEnd(); } @@ -23447,13 +24249,13 @@ void swap(WMGetAllResourcePlanResponse &a, WMGetAllResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other945) { - resourcePlans = other945.resourcePlans; - __isset = other945.__isset; +WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other987) { + resourcePlans = other987.resourcePlans; + __isset = other987.__isset; } -WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other946) { - resourcePlans = other946.resourcePlans; - __isset = other946.__isset; +WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other988) { + resourcePlans = other988.resourcePlans; + __isset = other988.__isset; return *this; } void WMGetAllResourcePlanResponse::printTo(std::ostream& out) const { @@ -23611,21 +24413,21 @@ void swap(WMAlterResourcePlanRequest &a, WMAlterResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other947) { - resourcePlanName = other947.resourcePlanName; - resourcePlan = other947.resourcePlan; - isEnableAndActivate = other947.isEnableAndActivate; - isForceDeactivate = other947.isForceDeactivate; - isReplace = other947.isReplace; - __isset = other947.__isset; -} -WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other948) { - resourcePlanName = other948.resourcePlanName; - resourcePlan = other948.resourcePlan; - isEnableAndActivate = other948.isEnableAndActivate; - isForceDeactivate = other948.isForceDeactivate; - isReplace = other948.isReplace; - __isset = other948.__isset; +WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other989) { + resourcePlanName = other989.resourcePlanName; + resourcePlan = other989.resourcePlan; + isEnableAndActivate = other989.isEnableAndActivate; + isForceDeactivate = other989.isForceDeactivate; + isReplace = other989.isReplace; + __isset = other989.__isset; +} +WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other990) { + resourcePlanName = other990.resourcePlanName; + resourcePlan = other990.resourcePlan; + isEnableAndActivate = other990.isEnableAndActivate; + isForceDeactivate = other990.isForceDeactivate; + isReplace = other990.isReplace; + __isset = other990.__isset; return *this; } void WMAlterResourcePlanRequest::printTo(std::ostream& out) const { @@ -23711,13 +24513,13 @@ void swap(WMAlterResourcePlanResponse &a, WMAlterResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other949) { - fullResourcePlan = other949.fullResourcePlan; - __isset = other949.__isset; +WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other991) { + fullResourcePlan = other991.fullResourcePlan; + __isset = other991.__isset; } -WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other950) { - fullResourcePlan = other950.fullResourcePlan; - __isset = other950.__isset; +WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other992) { + fullResourcePlan = other992.fullResourcePlan; + __isset = other992.__isset; return *this; } void WMAlterResourcePlanResponse::printTo(std::ostream& out) const { @@ -23799,13 +24601,13 @@ void swap(WMValidateResourcePlanRequest &a, WMValidateResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other951) { - resourcePlanName = other951.resourcePlanName; - __isset = other951.__isset; +WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other993) { + resourcePlanName = other993.resourcePlanName; + __isset = other993.__isset; } -WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other952) { - resourcePlanName = other952.resourcePlanName; - __isset = other952.__isset; +WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other994) { + resourcePlanName = other994.resourcePlanName; + __isset = other994.__isset; return *this; } void WMValidateResourcePlanRequest::printTo(std::ostream& out) const { @@ -23855,14 +24657,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->errors.clear(); - uint32_t _size953; - ::apache::thrift::protocol::TType _etype956; - xfer += iprot->readListBegin(_etype956, _size953); - this->errors.resize(_size953); - uint32_t _i957; - for (_i957 = 0; _i957 < _size953; ++_i957) + uint32_t _size995; + ::apache::thrift::protocol::TType _etype998; + xfer += iprot->readListBegin(_etype998, _size995); + this->errors.resize(_size995); + uint32_t _i999; + for (_i999 = 0; _i999 < _size995; ++_i999) { - xfer += iprot->readString(this->errors[_i957]); + xfer += iprot->readString(this->errors[_i999]); } xfer += iprot->readListEnd(); } @@ -23875,14 +24677,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->warnings.clear(); - uint32_t _size958; - ::apache::thrift::protocol::TType _etype961; - xfer += iprot->readListBegin(_etype961, _size958); - this->warnings.resize(_size958); - uint32_t _i962; - for (_i962 = 0; _i962 < _size958; ++_i962) + uint32_t _size1000; + ::apache::thrift::protocol::TType _etype1003; + xfer += iprot->readListBegin(_etype1003, _size1000); + this->warnings.resize(_size1000); + uint32_t _i1004; + for (_i1004 = 0; _i1004 < _size1000; ++_i1004) { - xfer += iprot->readString(this->warnings[_i962]); + xfer += iprot->readString(this->warnings[_i1004]); } xfer += iprot->readListEnd(); } @@ -23912,10 +24714,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("errors", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->errors.size())); - std::vector ::const_iterator _iter963; - for (_iter963 = this->errors.begin(); _iter963 != this->errors.end(); ++_iter963) + std::vector ::const_iterator _iter1005; + for (_iter1005 = this->errors.begin(); _iter1005 != this->errors.end(); ++_iter1005) { - xfer += oprot->writeString((*_iter963)); + xfer += oprot->writeString((*_iter1005)); } xfer += oprot->writeListEnd(); } @@ -23925,10 +24727,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("warnings", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->warnings.size())); - std::vector ::const_iterator _iter964; - for (_iter964 = this->warnings.begin(); _iter964 != this->warnings.end(); ++_iter964) + std::vector ::const_iterator _iter1006; + for (_iter1006 = this->warnings.begin(); _iter1006 != this->warnings.end(); ++_iter1006) { - xfer += oprot->writeString((*_iter964)); + xfer += oprot->writeString((*_iter1006)); } xfer += oprot->writeListEnd(); } @@ -23946,15 +24748,15 @@ void swap(WMValidateResourcePlanResponse &a, WMValidateResourcePlanResponse &b) swap(a.__isset, b.__isset); } -WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other965) { - errors = other965.errors; - warnings = other965.warnings; - __isset = other965.__isset; +WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other1007) { + errors = other1007.errors; + warnings = other1007.warnings; + __isset = other1007.__isset; } -WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other966) { - errors = other966.errors; - warnings = other966.warnings; - __isset = other966.__isset; +WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other1008) { + errors = other1008.errors; + warnings = other1008.warnings; + __isset = other1008.__isset; return *this; } void WMValidateResourcePlanResponse::printTo(std::ostream& out) const { @@ -24037,13 +24839,13 @@ void swap(WMDropResourcePlanRequest &a, WMDropResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other967) { - resourcePlanName = other967.resourcePlanName; - __isset = other967.__isset; +WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other1009) { + resourcePlanName = other1009.resourcePlanName; + __isset = other1009.__isset; } -WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other968) { - resourcePlanName = other968.resourcePlanName; - __isset = other968.__isset; +WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other1010) { + resourcePlanName = other1010.resourcePlanName; + __isset = other1010.__isset; return *this; } void WMDropResourcePlanRequest::printTo(std::ostream& out) const { @@ -24102,11 +24904,11 @@ void swap(WMDropResourcePlanResponse &a, WMDropResourcePlanResponse &b) { (void) b; } -WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other969) { - (void) other969; +WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other1011) { + (void) other1011; } -WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other970) { - (void) other970; +WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other1012) { + (void) other1012; return *this; } void WMDropResourcePlanResponse::printTo(std::ostream& out) const { @@ -24187,13 +24989,13 @@ void swap(WMCreateTriggerRequest &a, WMCreateTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other971) { - trigger = other971.trigger; - __isset = other971.__isset; +WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other1013) { + trigger = other1013.trigger; + __isset = other1013.__isset; } -WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other972) { - trigger = other972.trigger; - __isset = other972.__isset; +WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other1014) { + trigger = other1014.trigger; + __isset = other1014.__isset; return *this; } void WMCreateTriggerRequest::printTo(std::ostream& out) const { @@ -24252,11 +25054,11 @@ void swap(WMCreateTriggerResponse &a, WMCreateTriggerResponse &b) { (void) b; } -WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other973) { - (void) other973; +WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other1015) { + (void) other1015; } -WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other974) { - (void) other974; +WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other1016) { + (void) other1016; return *this; } void WMCreateTriggerResponse::printTo(std::ostream& out) const { @@ -24337,13 +25139,13 @@ void swap(WMAlterTriggerRequest &a, WMAlterTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other975) { - trigger = other975.trigger; - __isset = other975.__isset; +WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other1017) { + trigger = other1017.trigger; + __isset = other1017.__isset; } -WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other976) { - trigger = other976.trigger; - __isset = other976.__isset; +WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other1018) { + trigger = other1018.trigger; + __isset = other1018.__isset; return *this; } void WMAlterTriggerRequest::printTo(std::ostream& out) const { @@ -24402,11 +25204,11 @@ void swap(WMAlterTriggerResponse &a, WMAlterTriggerResponse &b) { (void) b; } -WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other977) { - (void) other977; +WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other1019) { + (void) other1019; } -WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other978) { - (void) other978; +WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other1020) { + (void) other1020; return *this; } void WMAlterTriggerResponse::printTo(std::ostream& out) const { @@ -24506,15 +25308,15 @@ void swap(WMDropTriggerRequest &a, WMDropTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other979) { - resourcePlanName = other979.resourcePlanName; - triggerName = other979.triggerName; - __isset = other979.__isset; +WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other1021) { + resourcePlanName = other1021.resourcePlanName; + triggerName = other1021.triggerName; + __isset = other1021.__isset; } -WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other980) { - resourcePlanName = other980.resourcePlanName; - triggerName = other980.triggerName; - __isset = other980.__isset; +WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other1022) { + resourcePlanName = other1022.resourcePlanName; + triggerName = other1022.triggerName; + __isset = other1022.__isset; return *this; } void WMDropTriggerRequest::printTo(std::ostream& out) const { @@ -24574,11 +25376,11 @@ void swap(WMDropTriggerResponse &a, WMDropTriggerResponse &b) { (void) b; } -WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other981) { - (void) other981; +WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other1023) { + (void) other1023; } -WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other982) { - (void) other982; +WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other1024) { + (void) other1024; return *this; } void WMDropTriggerResponse::printTo(std::ostream& out) const { @@ -24659,13 +25461,13 @@ void swap(WMGetTriggersForResourePlanRequest &a, WMGetTriggersForResourePlanRequ swap(a.__isset, b.__isset); } -WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other983) { - resourcePlanName = other983.resourcePlanName; - __isset = other983.__isset; +WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other1025) { + resourcePlanName = other1025.resourcePlanName; + __isset = other1025.__isset; } -WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other984) { - resourcePlanName = other984.resourcePlanName; - __isset = other984.__isset; +WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other1026) { + resourcePlanName = other1026.resourcePlanName; + __isset = other1026.__isset; return *this; } void WMGetTriggersForResourePlanRequest::printTo(std::ostream& out) const { @@ -24710,14 +25512,14 @@ uint32_t WMGetTriggersForResourePlanResponse::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { this->triggers.clear(); - uint32_t _size985; - ::apache::thrift::protocol::TType _etype988; - xfer += iprot->readListBegin(_etype988, _size985); - this->triggers.resize(_size985); - uint32_t _i989; - for (_i989 = 0; _i989 < _size985; ++_i989) + uint32_t _size1027; + ::apache::thrift::protocol::TType _etype1030; + xfer += iprot->readListBegin(_etype1030, _size1027); + this->triggers.resize(_size1027); + uint32_t _i1031; + for (_i1031 = 0; _i1031 < _size1027; ++_i1031) { - xfer += this->triggers[_i989].read(iprot); + xfer += this->triggers[_i1031].read(iprot); } xfer += iprot->readListEnd(); } @@ -24747,10 +25549,10 @@ uint32_t WMGetTriggersForResourePlanResponse::write(::apache::thrift::protocol:: xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->triggers.size())); - std::vector ::const_iterator _iter990; - for (_iter990 = this->triggers.begin(); _iter990 != this->triggers.end(); ++_iter990) + std::vector ::const_iterator _iter1032; + for (_iter1032 = this->triggers.begin(); _iter1032 != this->triggers.end(); ++_iter1032) { - xfer += (*_iter990).write(oprot); + xfer += (*_iter1032).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24767,13 +25569,13 @@ void swap(WMGetTriggersForResourePlanResponse &a, WMGetTriggersForResourePlanRes swap(a.__isset, b.__isset); } -WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other991) { - triggers = other991.triggers; - __isset = other991.__isset; +WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other1033) { + triggers = other1033.triggers; + __isset = other1033.__isset; } -WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other992) { - triggers = other992.triggers; - __isset = other992.__isset; +WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other1034) { + triggers = other1034.triggers; + __isset = other1034.__isset; return *this; } void WMGetTriggersForResourePlanResponse::printTo(std::ostream& out) const { @@ -24855,13 +25657,13 @@ void swap(WMCreatePoolRequest &a, WMCreatePoolRequest &b) { swap(a.__isset, b.__isset); } -WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other993) { - pool = other993.pool; - __isset = other993.__isset; +WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other1035) { + pool = other1035.pool; + __isset = other1035.__isset; } -WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other994) { - pool = other994.pool; - __isset = other994.__isset; +WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other1036) { + pool = other1036.pool; + __isset = other1036.__isset; return *this; } void WMCreatePoolRequest::printTo(std::ostream& out) const { @@ -24920,11 +25722,11 @@ void swap(WMCreatePoolResponse &a, WMCreatePoolResponse &b) { (void) b; } -WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other995) { - (void) other995; +WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other1037) { + (void) other1037; } -WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other996) { - (void) other996; +WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other1038) { + (void) other1038; return *this; } void WMCreatePoolResponse::printTo(std::ostream& out) const { @@ -25024,15 +25826,15 @@ void swap(WMAlterPoolRequest &a, WMAlterPoolRequest &b) { swap(a.__isset, b.__isset); } -WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other997) { - pool = other997.pool; - poolPath = other997.poolPath; - __isset = other997.__isset; +WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other1039) { + pool = other1039.pool; + poolPath = other1039.poolPath; + __isset = other1039.__isset; } -WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other998) { - pool = other998.pool; - poolPath = other998.poolPath; - __isset = other998.__isset; +WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other1040) { + pool = other1040.pool; + poolPath = other1040.poolPath; + __isset = other1040.__isset; return *this; } void WMAlterPoolRequest::printTo(std::ostream& out) const { @@ -25092,11 +25894,11 @@ void swap(WMAlterPoolResponse &a, WMAlterPoolResponse &b) { (void) b; } -WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other999) { - (void) other999; +WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other1041) { + (void) other1041; } -WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1000) { - (void) other1000; +WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1042) { + (void) other1042; return *this; } void WMAlterPoolResponse::printTo(std::ostream& out) const { @@ -25196,15 +25998,15 @@ void swap(WMDropPoolRequest &a, WMDropPoolRequest &b) { swap(a.__isset, b.__isset); } -WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1001) { - resourcePlanName = other1001.resourcePlanName; - poolPath = other1001.poolPath; - __isset = other1001.__isset; +WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1043) { + resourcePlanName = other1043.resourcePlanName; + poolPath = other1043.poolPath; + __isset = other1043.__isset; } -WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1002) { - resourcePlanName = other1002.resourcePlanName; - poolPath = other1002.poolPath; - __isset = other1002.__isset; +WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1044) { + resourcePlanName = other1044.resourcePlanName; + poolPath = other1044.poolPath; + __isset = other1044.__isset; return *this; } void WMDropPoolRequest::printTo(std::ostream& out) const { @@ -25264,11 +26066,11 @@ void swap(WMDropPoolResponse &a, WMDropPoolResponse &b) { (void) b; } -WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1003) { - (void) other1003; +WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1045) { + (void) other1045; } -WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1004) { - (void) other1004; +WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1046) { + (void) other1046; return *this; } void WMDropPoolResponse::printTo(std::ostream& out) const { @@ -25368,15 +26170,15 @@ void swap(WMCreateOrUpdateMappingRequest &a, WMCreateOrUpdateMappingRequest &b) swap(a.__isset, b.__isset); } -WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1005) { - mapping = other1005.mapping; - update = other1005.update; - __isset = other1005.__isset; +WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1047) { + mapping = other1047.mapping; + update = other1047.update; + __isset = other1047.__isset; } -WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1006) { - mapping = other1006.mapping; - update = other1006.update; - __isset = other1006.__isset; +WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1048) { + mapping = other1048.mapping; + update = other1048.update; + __isset = other1048.__isset; return *this; } void WMCreateOrUpdateMappingRequest::printTo(std::ostream& out) const { @@ -25436,11 +26238,11 @@ void swap(WMCreateOrUpdateMappingResponse &a, WMCreateOrUpdateMappingResponse &b (void) b; } -WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1007) { - (void) other1007; +WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1049) { + (void) other1049; } -WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1008) { - (void) other1008; +WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1050) { + (void) other1050; return *this; } void WMCreateOrUpdateMappingResponse::printTo(std::ostream& out) const { @@ -25521,13 +26323,13 @@ void swap(WMDropMappingRequest &a, WMDropMappingRequest &b) { swap(a.__isset, b.__isset); } -WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1009) { - mapping = other1009.mapping; - __isset = other1009.__isset; +WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1051) { + mapping = other1051.mapping; + __isset = other1051.__isset; } -WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1010) { - mapping = other1010.mapping; - __isset = other1010.__isset; +WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1052) { + mapping = other1052.mapping; + __isset = other1052.__isset; return *this; } void WMDropMappingRequest::printTo(std::ostream& out) const { @@ -25586,11 +26388,11 @@ void swap(WMDropMappingResponse &a, WMDropMappingResponse &b) { (void) b; } -WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1011) { - (void) other1011; +WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1053) { + (void) other1053; } -WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1012) { - (void) other1012; +WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1054) { + (void) other1054; return *this; } void WMDropMappingResponse::printTo(std::ostream& out) const { @@ -25728,19 +26530,19 @@ void swap(WMCreateOrDropTriggerToPoolMappingRequest &a, WMCreateOrDropTriggerToP swap(a.__isset, b.__isset); } -WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1013) { - resourcePlanName = other1013.resourcePlanName; - triggerName = other1013.triggerName; - poolPath = other1013.poolPath; - drop = other1013.drop; - __isset = other1013.__isset; +WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1055) { + resourcePlanName = other1055.resourcePlanName; + triggerName = other1055.triggerName; + poolPath = other1055.poolPath; + drop = other1055.drop; + __isset = other1055.__isset; } -WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1014) { - resourcePlanName = other1014.resourcePlanName; - triggerName = other1014.triggerName; - poolPath = other1014.poolPath; - drop = other1014.drop; - __isset = other1014.__isset; +WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1056) { + resourcePlanName = other1056.resourcePlanName; + triggerName = other1056.triggerName; + poolPath = other1056.poolPath; + drop = other1056.drop; + __isset = other1056.__isset; return *this; } void WMCreateOrDropTriggerToPoolMappingRequest::printTo(std::ostream& out) const { @@ -25802,11 +26604,11 @@ void swap(WMCreateOrDropTriggerToPoolMappingResponse &a, WMCreateOrDropTriggerTo (void) b; } -WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1015) { - (void) other1015; +WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1057) { + (void) other1057; } -WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1016) { - (void) other1016; +WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1058) { + (void) other1058; return *this; } void WMCreateOrDropTriggerToPoolMappingResponse::printTo(std::ostream& out) const { @@ -25885,13 +26687,13 @@ void swap(MetaException &a, MetaException &b) { swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other1017) : TException() { - message = other1017.message; - __isset = other1017.__isset; +MetaException::MetaException(const MetaException& other1059) : TException() { + message = other1059.message; + __isset = other1059.__isset; } -MetaException& MetaException::operator=(const MetaException& other1018) { - message = other1018.message; - __isset = other1018.__isset; +MetaException& MetaException::operator=(const MetaException& other1060) { + message = other1060.message; + __isset = other1060.__isset; return *this; } void MetaException::printTo(std::ostream& out) const { @@ -25982,13 +26784,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other1019) : TException() { - message = other1019.message; - __isset = other1019.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other1061) : TException() { + message = other1061.message; + __isset = other1061.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1020) { - message = other1020.message; - __isset = other1020.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1062) { + message = other1062.message; + __isset = other1062.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -26079,13 +26881,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other1021) : TException() { - message = other1021.message; - __isset = other1021.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other1063) : TException() { + message = other1063.message; + __isset = other1063.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1022) { - message = other1022.message; - __isset = other1022.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1064) { + message = other1064.message; + __isset = other1064.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -26176,13 +26978,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1023) : TException() { - message = other1023.message; - __isset = other1023.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1065) : TException() { + message = other1065.message; + __isset = other1065.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1024) { - message = other1024.message; - __isset = other1024.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1066) { + message = other1066.message; + __isset = other1066.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -26273,13 +27075,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1025) : TException() { - message = other1025.message; - __isset = other1025.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1067) : TException() { + message = other1067.message; + __isset = other1067.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1026) { - message = other1026.message; - __isset = other1026.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1068) { + message = other1068.message; + __isset = other1068.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -26370,13 +27172,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1027) : TException() { - message = other1027.message; - __isset = other1027.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1069) : TException() { + message = other1069.message; + __isset = other1069.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1028) { - message = other1028.message; - __isset = other1028.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1070) { + message = other1070.message; + __isset = other1070.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -26467,13 +27269,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1029) : TException() { - message = other1029.message; - __isset = other1029.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1071) : TException() { + message = other1071.message; + __isset = other1071.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1030) { - message = other1030.message; - __isset = other1030.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1072) { + message = other1072.message; + __isset = other1072.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -26564,13 +27366,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1031) : TException() { - message = other1031.message; - __isset = other1031.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1073) : TException() { + message = other1073.message; + __isset = other1073.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1032) { - message = other1032.message; - __isset = other1032.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1074) { + message = other1074.message; + __isset = other1074.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -26661,13 +27463,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) { swap(a.__isset, b.__isset); } -IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other1033) : TException() { - message = other1033.message; - __isset = other1033.__isset; +IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other1075) : TException() { + message = other1075.message; + __isset = other1075.__isset; } -IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other1034) { - message = other1034.message; - __isset = other1034.__isset; +IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other1076) { + message = other1076.message; + __isset = other1076.__isset; return *this; } void IndexAlreadyExistsException::printTo(std::ostream& out) const { @@ -26758,13 +27560,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1035) : TException() { - message = other1035.message; - __isset = other1035.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1077) : TException() { + message = other1077.message; + __isset = other1077.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1036) { - message = other1036.message; - __isset = other1036.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1078) { + message = other1078.message; + __isset = other1078.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -26855,13 +27657,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1037) : TException() { - message = other1037.message; - __isset = other1037.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1079) : TException() { + message = other1079.message; + __isset = other1079.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1038) { - message = other1038.message; - __isset = other1038.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1080) { + message = other1080.message; + __isset = other1080.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -26952,13 +27754,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other1039) : TException() { - message = other1039.message; - __isset = other1039.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other1081) : TException() { + message = other1081.message; + __isset = other1081.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1040) { - message = other1040.message; - __isset = other1040.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1082) { + message = other1082.message; + __isset = other1082.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -27049,13 +27851,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1041) : TException() { - message = other1041.message; - __isset = other1041.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1083) : TException() { + message = other1083.message; + __isset = other1083.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1042) { - message = other1042.message; - __isset = other1042.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1084) { + message = other1084.message; + __isset = other1084.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -27146,13 +27948,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1043) : TException() { - message = other1043.message; - __isset = other1043.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1085) : TException() { + message = other1085.message; + __isset = other1085.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1044) { - message = other1044.message; - __isset = other1044.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1086) { + message = other1086.message; + __isset = other1086.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -27243,13 +28045,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other1045) : TException() { - message = other1045.message; - __isset = other1045.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other1087) : TException() { + message = other1087.message; + __isset = other1087.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1046) { - message = other1046.message; - __isset = other1046.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1088) { + message = other1088.message; + __isset = other1088.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -27340,13 +28142,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1047) : TException() { - message = other1047.message; - __isset = other1047.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1089) : TException() { + message = other1089.message; + __isset = other1089.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1048) { - message = other1048.message; - __isset = other1048.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1090) { + message = other1090.message; + __isset = other1090.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 4c09bc8..2fb27d2 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -364,6 +364,18 @@ class AbortTxnsRequest; class CommitTxnRequest; +class GetOpenWriteIdsRequest; + +class OpenWriteIds; + +class GetOpenWriteIdsResponse; + +class AllocateTableWriteIdRequest; + +class TxnToWriteId; + +class AllocateTableWriteIdResponse; + class LockComponent; class LockRequest; @@ -5954,6 +5966,294 @@ inline std::ostream& operator<<(std::ostream& out, const CommitTxnRequest& obj) return out; } + +class GetOpenWriteIdsRequest { + public: + + GetOpenWriteIdsRequest(const GetOpenWriteIdsRequest&); + GetOpenWriteIdsRequest& operator=(const GetOpenWriteIdsRequest&); + GetOpenWriteIdsRequest() : validTxnStr() { + } + + virtual ~GetOpenWriteIdsRequest() throw(); + std::vector tableNames; + std::string validTxnStr; + + void __set_tableNames(const std::vector & val); + + void __set_validTxnStr(const std::string& val); + + bool operator == (const GetOpenWriteIdsRequest & rhs) const + { + if (!(tableNames == rhs.tableNames)) + return false; + if (!(validTxnStr == rhs.validTxnStr)) + return false; + return true; + } + bool operator != (const GetOpenWriteIdsRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetOpenWriteIdsRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetOpenWriteIdsRequest &a, GetOpenWriteIdsRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const GetOpenWriteIdsRequest& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _OpenWriteIds__isset { + _OpenWriteIds__isset() : minWriteId(false) {} + bool minWriteId :1; +} _OpenWriteIds__isset; + +class OpenWriteIds { + public: + + OpenWriteIds(const OpenWriteIds&); + OpenWriteIds& operator=(const OpenWriteIds&); + OpenWriteIds() : tableName(), writeIdHighWaterMark(0), minWriteId(0), abortedBits() { + } + + virtual ~OpenWriteIds() throw(); + std::string tableName; + int64_t writeIdHighWaterMark; + std::vector openWriteIds; + int64_t minWriteId; + std::string abortedBits; + + _OpenWriteIds__isset __isset; + + void __set_tableName(const std::string& val); + + void __set_writeIdHighWaterMark(const int64_t val); + + void __set_openWriteIds(const std::vector & val); + + void __set_minWriteId(const int64_t val); + + void __set_abortedBits(const std::string& val); + + bool operator == (const OpenWriteIds & rhs) const + { + if (!(tableName == rhs.tableName)) + return false; + if (!(writeIdHighWaterMark == rhs.writeIdHighWaterMark)) + return false; + if (!(openWriteIds == rhs.openWriteIds)) + return false; + if (__isset.minWriteId != rhs.__isset.minWriteId) + return false; + else if (__isset.minWriteId && !(minWriteId == rhs.minWriteId)) + return false; + if (!(abortedBits == rhs.abortedBits)) + return false; + return true; + } + bool operator != (const OpenWriteIds &rhs) const { + return !(*this == rhs); + } + + bool operator < (const OpenWriteIds & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(OpenWriteIds &a, OpenWriteIds &b); + +inline std::ostream& operator<<(std::ostream& out, const OpenWriteIds& obj) +{ + obj.printTo(out); + return out; +} + + +class GetOpenWriteIdsResponse { + public: + + GetOpenWriteIdsResponse(const GetOpenWriteIdsResponse&); + GetOpenWriteIdsResponse& operator=(const GetOpenWriteIdsResponse&); + GetOpenWriteIdsResponse() { + } + + virtual ~GetOpenWriteIdsResponse() throw(); + std::vector openWriteIds; + + void __set_openWriteIds(const std::vector & val); + + bool operator == (const GetOpenWriteIdsResponse & rhs) const + { + if (!(openWriteIds == rhs.openWriteIds)) + return false; + return true; + } + bool operator != (const GetOpenWriteIdsResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetOpenWriteIdsResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetOpenWriteIdsResponse &a, GetOpenWriteIdsResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const GetOpenWriteIdsResponse& obj) +{ + obj.printTo(out); + return out; +} + + +class AllocateTableWriteIdRequest { + public: + + AllocateTableWriteIdRequest(const AllocateTableWriteIdRequest&); + AllocateTableWriteIdRequest& operator=(const AllocateTableWriteIdRequest&); + AllocateTableWriteIdRequest() : dbName(), tableName() { + } + + virtual ~AllocateTableWriteIdRequest() throw(); + std::vector txnIds; + std::string dbName; + std::string tableName; + + void __set_txnIds(const std::vector & val); + + void __set_dbName(const std::string& val); + + void __set_tableName(const std::string& val); + + bool operator == (const AllocateTableWriteIdRequest & rhs) const + { + if (!(txnIds == rhs.txnIds)) + return false; + if (!(dbName == rhs.dbName)) + return false; + if (!(tableName == rhs.tableName)) + return false; + return true; + } + bool operator != (const AllocateTableWriteIdRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const AllocateTableWriteIdRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(AllocateTableWriteIdRequest &a, AllocateTableWriteIdRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const AllocateTableWriteIdRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class TxnToWriteId { + public: + + TxnToWriteId(const TxnToWriteId&); + TxnToWriteId& operator=(const TxnToWriteId&); + TxnToWriteId() : txnId(0), writeId(0) { + } + + virtual ~TxnToWriteId() throw(); + int64_t txnId; + int64_t writeId; + + void __set_txnId(const int64_t val); + + void __set_writeId(const int64_t val); + + bool operator == (const TxnToWriteId & rhs) const + { + if (!(txnId == rhs.txnId)) + return false; + if (!(writeId == rhs.writeId)) + return false; + return true; + } + bool operator != (const TxnToWriteId &rhs) const { + return !(*this == rhs); + } + + bool operator < (const TxnToWriteId & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(TxnToWriteId &a, TxnToWriteId &b); + +inline std::ostream& operator<<(std::ostream& out, const TxnToWriteId& obj) +{ + obj.printTo(out); + return out; +} + + +class AllocateTableWriteIdResponse { + public: + + AllocateTableWriteIdResponse(const AllocateTableWriteIdResponse&); + AllocateTableWriteIdResponse& operator=(const AllocateTableWriteIdResponse&); + AllocateTableWriteIdResponse() { + } + + virtual ~AllocateTableWriteIdResponse() throw(); + std::vector txnToWriteIds; + + void __set_txnToWriteIds(const std::vector & val); + + bool operator == (const AllocateTableWriteIdResponse & rhs) const + { + if (!(txnToWriteIds == rhs.txnToWriteIds)) + return false; + return true; + } + bool operator != (const AllocateTableWriteIdResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const AllocateTableWriteIdResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(AllocateTableWriteIdResponse &a, AllocateTableWriteIdResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const AllocateTableWriteIdResponse& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _LockComponent__isset { _LockComponent__isset() : tablename(false), partitionname(false), operationType(true), isAcid(true), isDynamicPartitionWrite(true) {} bool tablename :1; diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java index dae233a..88d44d5 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java @@ -727,13 +727,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartition case 4: // PARTITIONNAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list606 = iprot.readListBegin(); - struct.partitionnames = new ArrayList(_list606.size); - String _elem607; - for (int _i608 = 0; _i608 < _list606.size; ++_i608) + org.apache.thrift.protocol.TList _list646 = iprot.readListBegin(); + struct.partitionnames = new ArrayList(_list646.size); + String _elem647; + for (int _i648 = 0; _i648 < _list646.size; ++_i648) { - _elem607 = iprot.readString(); - struct.partitionnames.add(_elem607); + _elem647 = iprot.readString(); + struct.partitionnames.add(_elem647); } iprot.readListEnd(); } @@ -780,9 +780,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitio oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionnames.size())); - for (String _iter609 : struct.partitionnames) + for (String _iter649 : struct.partitionnames) { - oprot.writeString(_iter609); + oprot.writeString(_iter649); } oprot.writeListEnd(); } @@ -817,9 +817,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartition oprot.writeString(struct.tablename); { oprot.writeI32(struct.partitionnames.size()); - for (String _iter610 : struct.partitionnames) + for (String _iter650 : struct.partitionnames) { - oprot.writeString(_iter610); + oprot.writeString(_iter650); } } BitSet optionals = new BitSet(); @@ -842,13 +842,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions struct.tablename = iprot.readString(); struct.setTablenameIsSet(true); { - org.apache.thrift.protocol.TList _list611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionnames = new ArrayList(_list611.size); - String _elem612; - for (int _i613 = 0; _i613 < _list611.size; ++_i613) + org.apache.thrift.protocol.TList _list651 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionnames = new ArrayList(_list651.size); + String _elem652; + for (int _i653 = 0; _i653 < _list651.size; ++_i653) { - _elem612 = iprot.readString(); - struct.partitionnames.add(_elem612); + _elem652 = iprot.readString(); + struct.partitionnames.add(_elem652); } } struct.setPartitionnamesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdRequest.java new file mode 100644 index 0000000..eb0f595 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdRequest.java @@ -0,0 +1,640 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AllocateTableWriteIdRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AllocateTableWriteIdRequest"); + + private static final org.apache.thrift.protocol.TField TXN_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txnIds", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new AllocateTableWriteIdRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new AllocateTableWriteIdRequestTupleSchemeFactory()); + } + + private List txnIds; // required + private String dbName; // required + private String tableName; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TXN_IDS((short)1, "txnIds"), + DB_NAME((short)2, "dbName"), + TABLE_NAME((short)3, "tableName"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TXN_IDS + return TXN_IDS; + case 2: // DB_NAME + return DB_NAME; + case 3: // TABLE_NAME + return TABLE_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TXN_IDS, new org.apache.thrift.meta_data.FieldMetaData("txnIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AllocateTableWriteIdRequest.class, metaDataMap); + } + + public AllocateTableWriteIdRequest() { + } + + public AllocateTableWriteIdRequest( + List txnIds, + String dbName, + String tableName) + { + this(); + this.txnIds = txnIds; + this.dbName = dbName; + this.tableName = tableName; + } + + /** + * Performs a deep copy on other. + */ + public AllocateTableWriteIdRequest(AllocateTableWriteIdRequest other) { + if (other.isSetTxnIds()) { + List __this__txnIds = new ArrayList(other.txnIds); + this.txnIds = __this__txnIds; + } + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTableName()) { + this.tableName = other.tableName; + } + } + + public AllocateTableWriteIdRequest deepCopy() { + return new AllocateTableWriteIdRequest(this); + } + + @Override + public void clear() { + this.txnIds = null; + this.dbName = null; + this.tableName = null; + } + + public int getTxnIdsSize() { + return (this.txnIds == null) ? 0 : this.txnIds.size(); + } + + public java.util.Iterator getTxnIdsIterator() { + return (this.txnIds == null) ? null : this.txnIds.iterator(); + } + + public void addToTxnIds(long elem) { + if (this.txnIds == null) { + this.txnIds = new ArrayList(); + } + this.txnIds.add(elem); + } + + public List getTxnIds() { + return this.txnIds; + } + + public void setTxnIds(List txnIds) { + this.txnIds = txnIds; + } + + public void unsetTxnIds() { + this.txnIds = null; + } + + /** Returns true if field txnIds is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnIds() { + return this.txnIds != null; + } + + public void setTxnIdsIsSet(boolean value) { + if (!value) { + this.txnIds = null; + } + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTableName() { + return this.tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public void unsetTableName() { + this.tableName = null; + } + + /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ + public boolean isSetTableName() { + return this.tableName != null; + } + + public void setTableNameIsSet(boolean value) { + if (!value) { + this.tableName = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TXN_IDS: + if (value == null) { + unsetTxnIds(); + } else { + setTxnIds((List)value); + } + break; + + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TABLE_NAME: + if (value == null) { + unsetTableName(); + } else { + setTableName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TXN_IDS: + return getTxnIds(); + + case DB_NAME: + return getDbName(); + + case TABLE_NAME: + return getTableName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TXN_IDS: + return isSetTxnIds(); + case DB_NAME: + return isSetDbName(); + case TABLE_NAME: + return isSetTableName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof AllocateTableWriteIdRequest) + return this.equals((AllocateTableWriteIdRequest)that); + return false; + } + + public boolean equals(AllocateTableWriteIdRequest that) { + if (that == null) + return false; + + boolean this_present_txnIds = true && this.isSetTxnIds(); + boolean that_present_txnIds = true && that.isSetTxnIds(); + if (this_present_txnIds || that_present_txnIds) { + if (!(this_present_txnIds && that_present_txnIds)) + return false; + if (!this.txnIds.equals(that.txnIds)) + return false; + } + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tableName = true && this.isSetTableName(); + boolean that_present_tableName = true && that.isSetTableName(); + if (this_present_tableName || that_present_tableName) { + if (!(this_present_tableName && that_present_tableName)) + return false; + if (!this.tableName.equals(that.tableName)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_txnIds = true && (isSetTxnIds()); + list.add(present_txnIds); + if (present_txnIds) + list.add(txnIds); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tableName = true && (isSetTableName()); + list.add(present_tableName); + if (present_tableName) + list.add(tableName); + + return list.hashCode(); + } + + @Override + public int compareTo(AllocateTableWriteIdRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTxnIds()).compareTo(other.isSetTxnIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnIds, other.txnIds); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("AllocateTableWriteIdRequest("); + boolean first = true; + + sb.append("txnIds:"); + if (this.txnIds == null) { + sb.append("null"); + } else { + sb.append(this.txnIds); + } + first = false; + if (!first) sb.append(", "); + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tableName:"); + if (this.tableName == null) { + sb.append("null"); + } else { + sb.append(this.tableName); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetTxnIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'txnIds' is unset! Struct:" + toString()); + } + + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTableName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class AllocateTableWriteIdRequestStandardSchemeFactory implements SchemeFactory { + public AllocateTableWriteIdRequestStandardScheme getScheme() { + return new AllocateTableWriteIdRequestStandardScheme(); + } + } + + private static class AllocateTableWriteIdRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteIdRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TXN_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list580 = iprot.readListBegin(); + struct.txnIds = new ArrayList(_list580.size); + long _elem581; + for (int _i582 = 0; _i582 < _list580.size; ++_i582) + { + _elem581 = iprot.readI64(); + struct.txnIds.add(_elem581); + } + iprot.readListEnd(); + } + struct.setTxnIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWriteIdRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.txnIds != null) { + oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txnIds.size())); + for (long _iter583 : struct.txnIds) + { + oprot.writeI64(_iter583); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tableName != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.tableName); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class AllocateTableWriteIdRequestTupleSchemeFactory implements SchemeFactory { + public AllocateTableWriteIdRequestTupleScheme getScheme() { + return new AllocateTableWriteIdRequestTupleScheme(); + } + } + + private static class AllocateTableWriteIdRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.txnIds.size()); + for (long _iter584 : struct.txnIds) + { + oprot.writeI64(_iter584); + } + } + oprot.writeString(struct.dbName); + oprot.writeString(struct.tableName); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list585 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txnIds = new ArrayList(_list585.size); + long _elem586; + for (int _i587 = 0; _i587 < _list585.size; ++_i587) + { + _elem586 = iprot.readI64(); + struct.txnIds.add(_elem586); + } + } + struct.setTxnIdsIsSet(true); + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdResponse.java new file mode 100644 index 0000000..25f84cc --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdResponse.java @@ -0,0 +1,443 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AllocateTableWriteIdResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AllocateTableWriteIdResponse"); + + private static final org.apache.thrift.protocol.TField TXN_TO_WRITE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txnToWriteIds", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new AllocateTableWriteIdResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new AllocateTableWriteIdResponseTupleSchemeFactory()); + } + + private List txnToWriteIds; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TXN_TO_WRITE_IDS((short)1, "txnToWriteIds"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TXN_TO_WRITE_IDS + return TXN_TO_WRITE_IDS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TXN_TO_WRITE_IDS, new org.apache.thrift.meta_data.FieldMetaData("txnToWriteIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TxnToWriteId.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AllocateTableWriteIdResponse.class, metaDataMap); + } + + public AllocateTableWriteIdResponse() { + } + + public AllocateTableWriteIdResponse( + List txnToWriteIds) + { + this(); + this.txnToWriteIds = txnToWriteIds; + } + + /** + * Performs a deep copy on other. + */ + public AllocateTableWriteIdResponse(AllocateTableWriteIdResponse other) { + if (other.isSetTxnToWriteIds()) { + List __this__txnToWriteIds = new ArrayList(other.txnToWriteIds.size()); + for (TxnToWriteId other_element : other.txnToWriteIds) { + __this__txnToWriteIds.add(new TxnToWriteId(other_element)); + } + this.txnToWriteIds = __this__txnToWriteIds; + } + } + + public AllocateTableWriteIdResponse deepCopy() { + return new AllocateTableWriteIdResponse(this); + } + + @Override + public void clear() { + this.txnToWriteIds = null; + } + + public int getTxnToWriteIdsSize() { + return (this.txnToWriteIds == null) ? 0 : this.txnToWriteIds.size(); + } + + public java.util.Iterator getTxnToWriteIdsIterator() { + return (this.txnToWriteIds == null) ? null : this.txnToWriteIds.iterator(); + } + + public void addToTxnToWriteIds(TxnToWriteId elem) { + if (this.txnToWriteIds == null) { + this.txnToWriteIds = new ArrayList(); + } + this.txnToWriteIds.add(elem); + } + + public List getTxnToWriteIds() { + return this.txnToWriteIds; + } + + public void setTxnToWriteIds(List txnToWriteIds) { + this.txnToWriteIds = txnToWriteIds; + } + + public void unsetTxnToWriteIds() { + this.txnToWriteIds = null; + } + + /** Returns true if field txnToWriteIds is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnToWriteIds() { + return this.txnToWriteIds != null; + } + + public void setTxnToWriteIdsIsSet(boolean value) { + if (!value) { + this.txnToWriteIds = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TXN_TO_WRITE_IDS: + if (value == null) { + unsetTxnToWriteIds(); + } else { + setTxnToWriteIds((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TXN_TO_WRITE_IDS: + return getTxnToWriteIds(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TXN_TO_WRITE_IDS: + return isSetTxnToWriteIds(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof AllocateTableWriteIdResponse) + return this.equals((AllocateTableWriteIdResponse)that); + return false; + } + + public boolean equals(AllocateTableWriteIdResponse that) { + if (that == null) + return false; + + boolean this_present_txnToWriteIds = true && this.isSetTxnToWriteIds(); + boolean that_present_txnToWriteIds = true && that.isSetTxnToWriteIds(); + if (this_present_txnToWriteIds || that_present_txnToWriteIds) { + if (!(this_present_txnToWriteIds && that_present_txnToWriteIds)) + return false; + if (!this.txnToWriteIds.equals(that.txnToWriteIds)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_txnToWriteIds = true && (isSetTxnToWriteIds()); + list.add(present_txnToWriteIds); + if (present_txnToWriteIds) + list.add(txnToWriteIds); + + return list.hashCode(); + } + + @Override + public int compareTo(AllocateTableWriteIdResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTxnToWriteIds()).compareTo(other.isSetTxnToWriteIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnToWriteIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnToWriteIds, other.txnToWriteIds); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("AllocateTableWriteIdResponse("); + boolean first = true; + + sb.append("txnToWriteIds:"); + if (this.txnToWriteIds == null) { + sb.append("null"); + } else { + sb.append(this.txnToWriteIds); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetTxnToWriteIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'txnToWriteIds' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class AllocateTableWriteIdResponseStandardSchemeFactory implements SchemeFactory { + public AllocateTableWriteIdResponseStandardScheme getScheme() { + return new AllocateTableWriteIdResponseStandardScheme(); + } + } + + private static class AllocateTableWriteIdResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteIdResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TXN_TO_WRITE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list588 = iprot.readListBegin(); + struct.txnToWriteIds = new ArrayList(_list588.size); + TxnToWriteId _elem589; + for (int _i590 = 0; _i590 < _list588.size; ++_i590) + { + _elem589 = new TxnToWriteId(); + _elem589.read(iprot); + struct.txnToWriteIds.add(_elem589); + } + iprot.readListEnd(); + } + struct.setTxnToWriteIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWriteIdResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.txnToWriteIds != null) { + oprot.writeFieldBegin(TXN_TO_WRITE_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.txnToWriteIds.size())); + for (TxnToWriteId _iter591 : struct.txnToWriteIds) + { + _iter591.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class AllocateTableWriteIdResponseTupleSchemeFactory implements SchemeFactory { + public AllocateTableWriteIdResponseTupleScheme getScheme() { + return new AllocateTableWriteIdResponseTupleScheme(); + } + } + + private static class AllocateTableWriteIdResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.txnToWriteIds.size()); + for (TxnToWriteId _iter592 : struct.txnToWriteIds) + { + _iter592.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list593 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.txnToWriteIds = new ArrayList(_list593.size); + TxnToWriteId _elem594; + for (int _i595 = 0; _i595 < _list593.size; ++_i595) + { + _elem594 = new TxnToWriteId(); + _elem594.read(iprot); + struct.txnToWriteIds.add(_elem594); + } + } + struct.setTxnToWriteIdsIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java index 16e85cf..ee9841f 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataRe case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list706 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list706.size); - long _elem707; - for (int _i708 = 0; _i708 < _list706.size; ++_i708) + org.apache.thrift.protocol.TList _list746 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list746.size); + long _elem747; + for (int _i748 = 0; _i748 < _list746.size; ++_i748) { - _elem707 = iprot.readI64(); - struct.fileIds.add(_elem707); + _elem747 = iprot.readI64(); + struct.fileIds.add(_elem747); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataR oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter709 : struct.fileIds) + for (long _iter749 : struct.fileIds) { - oprot.writeI64(_iter709); + oprot.writeI64(_iter749); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter710 : struct.fileIds) + for (long _iter750 : struct.fileIds) { - oprot.writeI64(_iter710); + oprot.writeI64(_iter750); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list711 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list711.size); - long _elem712; - for (int _i713 = 0; _i713 < _list711.size; ++_i713) + org.apache.thrift.protocol.TList _list751 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list751.size); + long _elem752; + for (int _i753 = 0; _i753 < _list751.size; ++_i753) { - _elem712 = iprot.readI64(); - struct.fileIds.add(_elem712); + _elem752 = iprot.readI64(); + struct.fileIds.add(_elem752); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java index 816b61b..8dbe4c1 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java @@ -354,13 +354,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClientCapabilities case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list722 = iprot.readListBegin(); - struct.values = new ArrayList(_list722.size); - ClientCapability _elem723; - for (int _i724 = 0; _i724 < _list722.size; ++_i724) + org.apache.thrift.protocol.TList _list762 = iprot.readListBegin(); + struct.values = new ArrayList(_list762.size); + ClientCapability _elem763; + for (int _i764 = 0; _i764 < _list762.size; ++_i764) { - _elem723 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem723); + _elem763 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem763); } iprot.readListEnd(); } @@ -386,9 +386,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClientCapabilities oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size())); - for (ClientCapability _iter725 : struct.values) + for (ClientCapability _iter765 : struct.values) { - oprot.writeI32(_iter725.getValue()); + oprot.writeI32(_iter765.getValue()); } oprot.writeListEnd(); } @@ -413,9 +413,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.values.size()); - for (ClientCapability _iter726 : struct.values) + for (ClientCapability _iter766 : struct.values) { - oprot.writeI32(_iter726.getValue()); + oprot.writeI32(_iter766.getValue()); } } } @@ -424,13 +424,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities public void read(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list727 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.values = new ArrayList(_list727.size); - ClientCapability _elem728; - for (int _i729 = 0; _i729 < _list727.size; ++_i729) + org.apache.thrift.protocol.TList _list767 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.values = new ArrayList(_list767.size); + ClientCapability _elem768; + for (int _i769 = 0; _i769 < _list767.size; ++_i769) { - _elem728 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem728); + _elem768 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem768); } } struct.setValuesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java index 6da2b88..1853720 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java @@ -814,15 +814,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionRequest s case 6: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map588 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map588.size); - String _key589; - String _val590; - for (int _i591 = 0; _i591 < _map588.size; ++_i591) + org.apache.thrift.protocol.TMap _map628 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map628.size); + String _key629; + String _val630; + for (int _i631 = 0; _i631 < _map628.size; ++_i631) { - _key589 = iprot.readString(); - _val590 = iprot.readString(); - struct.properties.put(_key589, _val590); + _key629 = iprot.readString(); + _val630 = iprot.readString(); + struct.properties.put(_key629, _val630); } iprot.readMapEnd(); } @@ -878,10 +878,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionRequest oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter592 : struct.properties.entrySet()) + for (Map.Entry _iter632 : struct.properties.entrySet()) { - oprot.writeString(_iter592.getKey()); - oprot.writeString(_iter592.getValue()); + oprot.writeString(_iter632.getKey()); + oprot.writeString(_iter632.getValue()); } oprot.writeMapEnd(); } @@ -928,10 +928,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest s if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter593 : struct.properties.entrySet()) + for (Map.Entry _iter633 : struct.properties.entrySet()) { - oprot.writeString(_iter593.getKey()); - oprot.writeString(_iter593.getValue()); + oprot.writeString(_iter633.getKey()); + oprot.writeString(_iter633.getValue()); } } } @@ -957,15 +957,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest st } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map594 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map594.size); - String _key595; - String _val596; - for (int _i597 = 0; _i597 < _map594.size; ++_i597) + org.apache.thrift.protocol.TMap _map634 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map634.size); + String _key635; + String _val636; + for (int _i637 = 0; _i637 < _map634.size; ++_i637) { - _key595 = iprot.readString(); - _val596 = iprot.readString(); - struct.properties.put(_key595, _val596); + _key635 = iprot.readString(); + _val636 = iprot.readString(); + struct.properties.put(_key635, _val636); } } struct.setPropertiesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java index 74cfce6..717840f 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java @@ -619,13 +619,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata st case 3: // TABLES_USED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set614 = iprot.readSetBegin(); - struct.tablesUsed = new HashSet(2*_set614.size); - String _elem615; - for (int _i616 = 0; _i616 < _set614.size; ++_i616) + org.apache.thrift.protocol.TSet _set654 = iprot.readSetBegin(); + struct.tablesUsed = new HashSet(2*_set654.size); + String _elem655; + for (int _i656 = 0; _i656 < _set654.size; ++_i656) { - _elem615 = iprot.readString(); - struct.tablesUsed.add(_elem615); + _elem655 = iprot.readString(); + struct.tablesUsed.add(_elem655); } iprot.readSetEnd(); } @@ -669,9 +669,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreationMetadata s oprot.writeFieldBegin(TABLES_USED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.tablesUsed.size())); - for (String _iter617 : struct.tablesUsed) + for (String _iter657 : struct.tablesUsed) { - oprot.writeString(_iter617); + oprot.writeString(_iter657); } oprot.writeSetEnd(); } @@ -705,9 +705,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CreationMetadata st oprot.writeString(struct.tblName); { oprot.writeI32(struct.tablesUsed.size()); - for (String _iter618 : struct.tablesUsed) + for (String _iter658 : struct.tablesUsed) { - oprot.writeString(_iter618); + oprot.writeString(_iter658); } } BitSet optionals = new BitSet(); @@ -728,13 +728,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CreationMetadata str struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TSet _set619 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tablesUsed = new HashSet(2*_set619.size); - String _elem620; - for (int _i621 = 0; _i621 < _set619.size; ++_i621) + org.apache.thrift.protocol.TSet _set659 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tablesUsed = new HashSet(2*_set659.size); + String _elem660; + for (int _i661 = 0; _i661 < _set659.size; ++_i661) { - _elem620 = iprot.readString(); - struct.tablesUsed.add(_elem620); + _elem660 = iprot.readString(); + struct.tablesUsed.add(_elem660); } } struct.setTablesUsedIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java index 7cc201b..8936410 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java @@ -713,13 +713,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventRequest st case 5: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list646 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list646.size); - String _elem647; - for (int _i648 = 0; _i648 < _list646.size; ++_i648) + org.apache.thrift.protocol.TList _list686 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list686.size); + String _elem687; + for (int _i688 = 0; _i688 < _list686.size; ++_i688) { - _elem647 = iprot.readString(); - struct.partitionVals.add(_elem647); + _elem687 = iprot.readString(); + struct.partitionVals.add(_elem687); } iprot.readListEnd(); } @@ -768,9 +768,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FireEventRequest s oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter649 : struct.partitionVals) + for (String _iter689 : struct.partitionVals) { - oprot.writeString(_iter649); + oprot.writeString(_iter689); } oprot.writeListEnd(); } @@ -816,9 +816,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest st if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter650 : struct.partitionVals) + for (String _iter690 : struct.partitionVals) { - oprot.writeString(_iter650); + oprot.writeString(_iter690); } } } @@ -843,13 +843,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest str } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list651 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list651.size); - String _elem652; - for (int _i653 = 0; _i653 < _list651.size; ++_i653) + org.apache.thrift.protocol.TList _list691 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list691.size); + String _elem692; + for (int _i693 = 0; _i693 < _list691.size; ++_i693) { - _elem652 = iprot.readString(); - struct.partitionVals.add(_elem652); + _elem692 = iprot.readString(); + struct.partitionVals.add(_elem692); } } struct.setPartitionValsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java index 2b4883d..ba29e90 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResp case 1: // FUNCTIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list714 = iprot.readListBegin(); - struct.functions = new ArrayList(_list714.size); - Function _elem715; - for (int _i716 = 0; _i716 < _list714.size; ++_i716) + org.apache.thrift.protocol.TList _list754 = iprot.readListBegin(); + struct.functions = new ArrayList(_list754.size); + Function _elem755; + for (int _i756 = 0; _i756 < _list754.size; ++_i756) { - _elem715 = new Function(); - _elem715.read(iprot); - struct.functions.add(_elem715); + _elem755 = new Function(); + _elem755.read(iprot); + struct.functions.add(_elem755); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsRes oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size())); - for (Function _iter717 : struct.functions) + for (Function _iter757 : struct.functions) { - _iter717.write(oprot); + _iter757.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResp if (struct.isSetFunctions()) { { oprot.writeI32(struct.functions.size()); - for (Function _iter718 : struct.functions) + for (Function _iter758 : struct.functions) { - _iter718.write(oprot); + _iter758.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsRespo BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list719 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.functions = new ArrayList(_list719.size); - Function _elem720; - for (int _i721 = 0; _i721 < _list719.size; ++_i721) + org.apache.thrift.protocol.TList _list759 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.functions = new ArrayList(_list759.size); + Function _elem760; + for (int _i761 = 0; _i761 < _list759.size; ++_i761) { - _elem720 = new Function(); - _elem720.read(iprot); - struct.functions.add(_elem720); + _elem760 = new Function(); + _elem760.read(iprot); + struct.functions.add(_elem760); } } struct.setFunctionsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java index 5a371c4..62b0768 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java @@ -619,13 +619,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list664 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list664.size); - long _elem665; - for (int _i666 = 0; _i666 < _list664.size; ++_i666) + org.apache.thrift.protocol.TList _list704 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list704.size); + long _elem705; + for (int _i706 = 0; _i706 < _list704.size; ++_i706) { - _elem665 = iprot.readI64(); - struct.fileIds.add(_elem665); + _elem705 = iprot.readI64(); + struct.fileIds.add(_elem705); } iprot.readListEnd(); } @@ -675,9 +675,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter667 : struct.fileIds) + for (long _iter707 : struct.fileIds) { - oprot.writeI64(_iter667); + oprot.writeI64(_iter707); } oprot.writeListEnd(); } @@ -719,9 +719,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter668 : struct.fileIds) + for (long _iter708 : struct.fileIds) { - oprot.writeI64(_iter668); + oprot.writeI64(_iter708); } } oprot.writeBinary(struct.expr); @@ -745,13 +745,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list669 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list669.size); - long _elem670; - for (int _i671 = 0; _i671 < _list669.size; ++_i671) + org.apache.thrift.protocol.TList _list709 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list709.size); + long _elem710; + for (int _i711 = 0; _i711 < _list709.size; ++_i711) { - _elem670 = iprot.readI64(); - struct.fileIds.add(_elem670); + _elem710 = iprot.readI64(); + struct.fileIds.add(_elem710); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java index 6eb6eee..881803f 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java @@ -444,16 +444,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map654 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map654.size); - long _key655; - MetadataPpdResult _val656; - for (int _i657 = 0; _i657 < _map654.size; ++_i657) + org.apache.thrift.protocol.TMap _map694 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map694.size); + long _key695; + MetadataPpdResult _val696; + for (int _i697 = 0; _i697 < _map694.size; ++_i697) { - _key655 = iprot.readI64(); - _val656 = new MetadataPpdResult(); - _val656.read(iprot); - struct.metadata.put(_key655, _val656); + _key695 = iprot.readI64(); + _val696 = new MetadataPpdResult(); + _val696.read(iprot); + struct.metadata.put(_key695, _val696); } iprot.readMapEnd(); } @@ -487,10 +487,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, struct.metadata.size())); - for (Map.Entry _iter658 : struct.metadata.entrySet()) + for (Map.Entry _iter698 : struct.metadata.entrySet()) { - oprot.writeI64(_iter658.getKey()); - _iter658.getValue().write(oprot); + oprot.writeI64(_iter698.getKey()); + _iter698.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -518,10 +518,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter659 : struct.metadata.entrySet()) + for (Map.Entry _iter699 : struct.metadata.entrySet()) { - oprot.writeI64(_iter659.getKey()); - _iter659.getValue().write(oprot); + oprot.writeI64(_iter699.getKey()); + _iter699.getValue().write(oprot); } } oprot.writeBool(struct.isSupported); @@ -531,16 +531,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map660 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.metadata = new HashMap(2*_map660.size); - long _key661; - MetadataPpdResult _val662; - for (int _i663 = 0; _i663 < _map660.size; ++_i663) + org.apache.thrift.protocol.TMap _map700 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.metadata = new HashMap(2*_map700.size); + long _key701; + MetadataPpdResult _val702; + for (int _i703 = 0; _i703 < _map700.size; ++_i703) { - _key661 = iprot.readI64(); - _val662 = new MetadataPpdResult(); - _val662.read(iprot); - struct.metadata.put(_key661, _val662); + _key701 = iprot.readI64(); + _val702 = new MetadataPpdResult(); + _val702.read(iprot); + struct.metadata.put(_key701, _val702); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java index 0404358..a051fb0 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list682 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list682.size); - long _elem683; - for (int _i684 = 0; _i684 < _list682.size; ++_i684) + org.apache.thrift.protocol.TList _list722 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list722.size); + long _elem723; + for (int _i724 = 0; _i724 < _list722.size; ++_i724) { - _elem683 = iprot.readI64(); - struct.fileIds.add(_elem683); + _elem723 = iprot.readI64(); + struct.fileIds.add(_elem723); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter685 : struct.fileIds) + for (long _iter725 : struct.fileIds) { - oprot.writeI64(_iter685); + oprot.writeI64(_iter725); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter686 : struct.fileIds) + for (long _iter726 : struct.fileIds) { - oprot.writeI64(_iter686); + oprot.writeI64(_iter726); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list687 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list687.size); - long _elem688; - for (int _i689 = 0; _i689 < _list687.size; ++_i689) + org.apache.thrift.protocol.TList _list727 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list727.size); + long _elem728; + for (int _i729 = 0; _i729 < _list727.size; ++_i729) { - _elem688 = iprot.readI64(); - struct.fileIds.add(_elem688); + _elem728 = iprot.readI64(); + struct.fileIds.add(_elem728); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java index 3858890..74ca66a 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java @@ -433,15 +433,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataResu case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map672 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map672.size); - long _key673; - ByteBuffer _val674; - for (int _i675 = 0; _i675 < _map672.size; ++_i675) + org.apache.thrift.protocol.TMap _map712 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map712.size); + long _key713; + ByteBuffer _val714; + for (int _i715 = 0; _i715 < _map712.size; ++_i715) { - _key673 = iprot.readI64(); - _val674 = iprot.readBinary(); - struct.metadata.put(_key673, _val674); + _key713 = iprot.readI64(); + _val714 = iprot.readBinary(); + struct.metadata.put(_key713, _val714); } iprot.readMapEnd(); } @@ -475,10 +475,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataRes oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (Map.Entry _iter676 : struct.metadata.entrySet()) + for (Map.Entry _iter716 : struct.metadata.entrySet()) { - oprot.writeI64(_iter676.getKey()); - oprot.writeBinary(_iter676.getValue()); + oprot.writeI64(_iter716.getKey()); + oprot.writeBinary(_iter716.getValue()); } oprot.writeMapEnd(); } @@ -506,10 +506,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter677 : struct.metadata.entrySet()) + for (Map.Entry _iter717 : struct.metadata.entrySet()) { - oprot.writeI64(_iter677.getKey()); - oprot.writeBinary(_iter677.getValue()); + oprot.writeI64(_iter717.getKey()); + oprot.writeBinary(_iter717.getValue()); } } oprot.writeBool(struct.isSupported); @@ -519,15 +519,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map678 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new HashMap(2*_map678.size); - long _key679; - ByteBuffer _val680; - for (int _i681 = 0; _i681 < _map678.size; ++_i681) + org.apache.thrift.protocol.TMap _map718 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new HashMap(2*_map718.size); + long _key719; + ByteBuffer _val720; + for (int _i721 = 0; _i721 < _map718.size; ++_i721) { - _key679 = iprot.readI64(); - _val680 = iprot.readBinary(); - struct.metadata.put(_key679, _val680); + _key719 = iprot.readI64(); + _val720 = iprot.readBinary(); + struct.metadata.put(_key719, _val720); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenWriteIdsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenWriteIdsRequest.java new file mode 100644 index 0000000..2d81761 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenWriteIdsRequest.java @@ -0,0 +1,539 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetOpenWriteIdsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOpenWriteIdsRequest"); + + private static final org.apache.thrift.protocol.TField TABLE_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("tableNames", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField VALID_TXN_STR_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnStr", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetOpenWriteIdsRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetOpenWriteIdsRequestTupleSchemeFactory()); + } + + private List tableNames; // required + private String validTxnStr; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TABLE_NAMES((short)1, "tableNames"), + VALID_TXN_STR((short)2, "validTxnStr"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TABLE_NAMES + return TABLE_NAMES; + case 2: // VALID_TXN_STR + return VALID_TXN_STR; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TABLE_NAMES, new org.apache.thrift.meta_data.FieldMetaData("tableNames", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.VALID_TXN_STR, new org.apache.thrift.meta_data.FieldMetaData("validTxnStr", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetOpenWriteIdsRequest.class, metaDataMap); + } + + public GetOpenWriteIdsRequest() { + } + + public GetOpenWriteIdsRequest( + List tableNames, + String validTxnStr) + { + this(); + this.tableNames = tableNames; + this.validTxnStr = validTxnStr; + } + + /** + * Performs a deep copy on other. + */ + public GetOpenWriteIdsRequest(GetOpenWriteIdsRequest other) { + if (other.isSetTableNames()) { + List __this__tableNames = new ArrayList(other.tableNames); + this.tableNames = __this__tableNames; + } + if (other.isSetValidTxnStr()) { + this.validTxnStr = other.validTxnStr; + } + } + + public GetOpenWriteIdsRequest deepCopy() { + return new GetOpenWriteIdsRequest(this); + } + + @Override + public void clear() { + this.tableNames = null; + this.validTxnStr = null; + } + + public int getTableNamesSize() { + return (this.tableNames == null) ? 0 : this.tableNames.size(); + } + + public java.util.Iterator getTableNamesIterator() { + return (this.tableNames == null) ? null : this.tableNames.iterator(); + } + + public void addToTableNames(String elem) { + if (this.tableNames == null) { + this.tableNames = new ArrayList(); + } + this.tableNames.add(elem); + } + + public List getTableNames() { + return this.tableNames; + } + + public void setTableNames(List tableNames) { + this.tableNames = tableNames; + } + + public void unsetTableNames() { + this.tableNames = null; + } + + /** Returns true if field tableNames is set (has been assigned a value) and false otherwise */ + public boolean isSetTableNames() { + return this.tableNames != null; + } + + public void setTableNamesIsSet(boolean value) { + if (!value) { + this.tableNames = null; + } + } + + public String getValidTxnStr() { + return this.validTxnStr; + } + + public void setValidTxnStr(String validTxnStr) { + this.validTxnStr = validTxnStr; + } + + public void unsetValidTxnStr() { + this.validTxnStr = null; + } + + /** Returns true if field validTxnStr is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnStr() { + return this.validTxnStr != null; + } + + public void setValidTxnStrIsSet(boolean value) { + if (!value) { + this.validTxnStr = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TABLE_NAMES: + if (value == null) { + unsetTableNames(); + } else { + setTableNames((List)value); + } + break; + + case VALID_TXN_STR: + if (value == null) { + unsetValidTxnStr(); + } else { + setValidTxnStr((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TABLE_NAMES: + return getTableNames(); + + case VALID_TXN_STR: + return getValidTxnStr(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TABLE_NAMES: + return isSetTableNames(); + case VALID_TXN_STR: + return isSetValidTxnStr(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetOpenWriteIdsRequest) + return this.equals((GetOpenWriteIdsRequest)that); + return false; + } + + public boolean equals(GetOpenWriteIdsRequest that) { + if (that == null) + return false; + + boolean this_present_tableNames = true && this.isSetTableNames(); + boolean that_present_tableNames = true && that.isSetTableNames(); + if (this_present_tableNames || that_present_tableNames) { + if (!(this_present_tableNames && that_present_tableNames)) + return false; + if (!this.tableNames.equals(that.tableNames)) + return false; + } + + boolean this_present_validTxnStr = true && this.isSetValidTxnStr(); + boolean that_present_validTxnStr = true && that.isSetValidTxnStr(); + if (this_present_validTxnStr || that_present_validTxnStr) { + if (!(this_present_validTxnStr && that_present_validTxnStr)) + return false; + if (!this.validTxnStr.equals(that.validTxnStr)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_tableNames = true && (isSetTableNames()); + list.add(present_tableNames); + if (present_tableNames) + list.add(tableNames); + + boolean present_validTxnStr = true && (isSetValidTxnStr()); + list.add(present_validTxnStr); + if (present_validTxnStr) + list.add(validTxnStr); + + return list.hashCode(); + } + + @Override + public int compareTo(GetOpenWriteIdsRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTableNames()).compareTo(other.isSetTableNames()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableNames()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableNames, other.tableNames); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidTxnStr()).compareTo(other.isSetValidTxnStr()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnStr()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnStr, other.validTxnStr); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetOpenWriteIdsRequest("); + boolean first = true; + + sb.append("tableNames:"); + if (this.tableNames == null) { + sb.append("null"); + } else { + sb.append(this.tableNames); + } + first = false; + if (!first) sb.append(", "); + sb.append("validTxnStr:"); + if (this.validTxnStr == null) { + sb.append("null"); + } else { + sb.append(this.validTxnStr); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetTableNames()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableNames' is unset! Struct:" + toString()); + } + + if (!isSetValidTxnStr()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'validTxnStr' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetOpenWriteIdsRequestStandardSchemeFactory implements SchemeFactory { + public GetOpenWriteIdsRequestStandardScheme getScheme() { + return new GetOpenWriteIdsRequestStandardScheme(); + } + } + + private static class GetOpenWriteIdsRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenWriteIdsRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TABLE_NAMES + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list556 = iprot.readListBegin(); + struct.tableNames = new ArrayList(_list556.size); + String _elem557; + for (int _i558 = 0; _i558 < _list556.size; ++_i558) + { + _elem557 = iprot.readString(); + struct.tableNames.add(_elem557); + } + iprot.readListEnd(); + } + struct.setTableNamesIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // VALID_TXN_STR + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnStr = iprot.readString(); + struct.setValidTxnStrIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenWriteIdsRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.tableNames != null) { + oprot.writeFieldBegin(TABLE_NAMES_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tableNames.size())); + for (String _iter559 : struct.tableNames) + { + oprot.writeString(_iter559); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.validTxnStr != null) { + oprot.writeFieldBegin(VALID_TXN_STR_FIELD_DESC); + oprot.writeString(struct.validTxnStr); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetOpenWriteIdsRequestTupleSchemeFactory implements SchemeFactory { + public GetOpenWriteIdsRequestTupleScheme getScheme() { + return new GetOpenWriteIdsRequestTupleScheme(); + } + } + + private static class GetOpenWriteIdsRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenWriteIdsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.tableNames.size()); + for (String _iter560 : struct.tableNames) + { + oprot.writeString(_iter560); + } + } + oprot.writeString(struct.validTxnStr); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenWriteIdsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list561 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tableNames = new ArrayList(_list561.size); + String _elem562; + for (int _i563 = 0; _i563 < _list561.size; ++_i563) + { + _elem562 = iprot.readString(); + struct.tableNames.add(_elem562); + } + } + struct.setTableNamesIsSet(true); + struct.validTxnStr = iprot.readString(); + struct.setValidTxnStrIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenWriteIdsResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenWriteIdsResponse.java new file mode 100644 index 0000000..13eadd0 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenWriteIdsResponse.java @@ -0,0 +1,443 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetOpenWriteIdsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOpenWriteIdsResponse"); + + private static final org.apache.thrift.protocol.TField OPEN_WRITE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("openWriteIds", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetOpenWriteIdsResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetOpenWriteIdsResponseTupleSchemeFactory()); + } + + private List openWriteIds; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + OPEN_WRITE_IDS((short)1, "openWriteIds"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // OPEN_WRITE_IDS + return OPEN_WRITE_IDS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.OPEN_WRITE_IDS, new org.apache.thrift.meta_data.FieldMetaData("openWriteIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, OpenWriteIds.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetOpenWriteIdsResponse.class, metaDataMap); + } + + public GetOpenWriteIdsResponse() { + } + + public GetOpenWriteIdsResponse( + List openWriteIds) + { + this(); + this.openWriteIds = openWriteIds; + } + + /** + * Performs a deep copy on other. + */ + public GetOpenWriteIdsResponse(GetOpenWriteIdsResponse other) { + if (other.isSetOpenWriteIds()) { + List __this__openWriteIds = new ArrayList(other.openWriteIds.size()); + for (OpenWriteIds other_element : other.openWriteIds) { + __this__openWriteIds.add(new OpenWriteIds(other_element)); + } + this.openWriteIds = __this__openWriteIds; + } + } + + public GetOpenWriteIdsResponse deepCopy() { + return new GetOpenWriteIdsResponse(this); + } + + @Override + public void clear() { + this.openWriteIds = null; + } + + public int getOpenWriteIdsSize() { + return (this.openWriteIds == null) ? 0 : this.openWriteIds.size(); + } + + public java.util.Iterator getOpenWriteIdsIterator() { + return (this.openWriteIds == null) ? null : this.openWriteIds.iterator(); + } + + public void addToOpenWriteIds(OpenWriteIds elem) { + if (this.openWriteIds == null) { + this.openWriteIds = new ArrayList(); + } + this.openWriteIds.add(elem); + } + + public List getOpenWriteIds() { + return this.openWriteIds; + } + + public void setOpenWriteIds(List openWriteIds) { + this.openWriteIds = openWriteIds; + } + + public void unsetOpenWriteIds() { + this.openWriteIds = null; + } + + /** Returns true if field openWriteIds is set (has been assigned a value) and false otherwise */ + public boolean isSetOpenWriteIds() { + return this.openWriteIds != null; + } + + public void setOpenWriteIdsIsSet(boolean value) { + if (!value) { + this.openWriteIds = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case OPEN_WRITE_IDS: + if (value == null) { + unsetOpenWriteIds(); + } else { + setOpenWriteIds((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case OPEN_WRITE_IDS: + return getOpenWriteIds(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case OPEN_WRITE_IDS: + return isSetOpenWriteIds(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetOpenWriteIdsResponse) + return this.equals((GetOpenWriteIdsResponse)that); + return false; + } + + public boolean equals(GetOpenWriteIdsResponse that) { + if (that == null) + return false; + + boolean this_present_openWriteIds = true && this.isSetOpenWriteIds(); + boolean that_present_openWriteIds = true && that.isSetOpenWriteIds(); + if (this_present_openWriteIds || that_present_openWriteIds) { + if (!(this_present_openWriteIds && that_present_openWriteIds)) + return false; + if (!this.openWriteIds.equals(that.openWriteIds)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_openWriteIds = true && (isSetOpenWriteIds()); + list.add(present_openWriteIds); + if (present_openWriteIds) + list.add(openWriteIds); + + return list.hashCode(); + } + + @Override + public int compareTo(GetOpenWriteIdsResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetOpenWriteIds()).compareTo(other.isSetOpenWriteIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetOpenWriteIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.openWriteIds, other.openWriteIds); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetOpenWriteIdsResponse("); + boolean first = true; + + sb.append("openWriteIds:"); + if (this.openWriteIds == null) { + sb.append("null"); + } else { + sb.append(this.openWriteIds); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetOpenWriteIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'openWriteIds' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetOpenWriteIdsResponseStandardSchemeFactory implements SchemeFactory { + public GetOpenWriteIdsResponseStandardScheme getScheme() { + return new GetOpenWriteIdsResponseStandardScheme(); + } + } + + private static class GetOpenWriteIdsResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenWriteIdsResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // OPEN_WRITE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list572 = iprot.readListBegin(); + struct.openWriteIds = new ArrayList(_list572.size); + OpenWriteIds _elem573; + for (int _i574 = 0; _i574 < _list572.size; ++_i574) + { + _elem573 = new OpenWriteIds(); + _elem573.read(iprot); + struct.openWriteIds.add(_elem573); + } + iprot.readListEnd(); + } + struct.setOpenWriteIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenWriteIdsResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.openWriteIds != null) { + oprot.writeFieldBegin(OPEN_WRITE_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.openWriteIds.size())); + for (OpenWriteIds _iter575 : struct.openWriteIds) + { + _iter575.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetOpenWriteIdsResponseTupleSchemeFactory implements SchemeFactory { + public GetOpenWriteIdsResponseTupleScheme getScheme() { + return new GetOpenWriteIdsResponseTupleScheme(); + } + } + + private static class GetOpenWriteIdsResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenWriteIdsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.openWriteIds.size()); + for (OpenWriteIds _iter576 : struct.openWriteIds) + { + _iter576.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenWriteIdsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list577 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.openWriteIds = new ArrayList(_list577.size); + OpenWriteIds _elem578; + for (int _i579 = 0; _i579 < _list577.size; ++_i579) + { + _elem578 = new OpenWriteIds(); + _elem578.read(iprot); + struct.openWriteIds.add(_elem578); + } + } + struct.setOpenWriteIdsIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java index 680ce86..84af22f 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java @@ -525,13 +525,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest st case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list730 = iprot.readListBegin(); - struct.tblNames = new ArrayList(_list730.size); - String _elem731; - for (int _i732 = 0; _i732 < _list730.size; ++_i732) + org.apache.thrift.protocol.TList _list770 = iprot.readListBegin(); + struct.tblNames = new ArrayList(_list770.size); + String _elem771; + for (int _i772 = 0; _i772 < _list770.size; ++_i772) { - _elem731 = iprot.readString(); - struct.tblNames.add(_elem731); + _elem771 = iprot.readString(); + struct.tblNames.add(_elem771); } iprot.readListEnd(); } @@ -572,9 +572,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest s oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tblNames.size())); - for (String _iter733 : struct.tblNames) + for (String _iter773 : struct.tblNames) { - oprot.writeString(_iter733); + oprot.writeString(_iter773); } oprot.writeListEnd(); } @@ -617,9 +617,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest st if (struct.isSetTblNames()) { { oprot.writeI32(struct.tblNames.size()); - for (String _iter734 : struct.tblNames) + for (String _iter774 : struct.tblNames) { - oprot.writeString(_iter734); + oprot.writeString(_iter774); } } } @@ -636,13 +636,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list735 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tblNames = new ArrayList(_list735.size); - String _elem736; - for (int _i737 = 0; _i737 < _list735.size; ++_i737) + org.apache.thrift.protocol.TList _list775 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tblNames = new ArrayList(_list775.size); + String _elem776; + for (int _i777 = 0; _i777 < _list775.size; ++_i777) { - _elem736 = iprot.readString(); - struct.tblNames.add(_elem736); + _elem776 = iprot.readString(); + struct.tblNames.add(_elem776); } } struct.setTblNamesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java index ccd85c4..4aba1d2 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesResult str case 1: // TABLES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list738 = iprot.readListBegin(); - struct.tables = new ArrayList
(_list738.size); - Table _elem739; - for (int _i740 = 0; _i740 < _list738.size; ++_i740) + org.apache.thrift.protocol.TList _list778 = iprot.readListBegin(); + struct.tables = new ArrayList
(_list778.size); + Table _elem779; + for (int _i780 = 0; _i780 < _list778.size; ++_i780) { - _elem739 = new Table(); - _elem739.read(iprot); - struct.tables.add(_elem739); + _elem779 = new Table(); + _elem779.read(iprot); + struct.tables.add(_elem779); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesResult st oprot.writeFieldBegin(TABLES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tables.size())); - for (Table _iter741 : struct.tables) + for (Table _iter781 : struct.tables) { - _iter741.write(oprot); + _iter781.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tables.size()); - for (Table _iter742 : struct.tables) + for (Table _iter782 : struct.tables) { - _iter742.write(oprot); + _iter782.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list743 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tables = new ArrayList
(_list743.size); - Table _elem744; - for (int _i745 = 0; _i745 < _list743.size; ++_i745) + org.apache.thrift.protocol.TList _list783 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tables = new ArrayList
(_list783.size); + Table _elem784; + for (int _i785 = 0; _i785 < _list783.size; ++_i785) { - _elem744 = new Table(); - _elem744.read(iprot); - struct.tables.add(_elem744); + _elem784 = new Table(); + _elem784.read(iprot); + struct.tables.add(_elem784); } } struct.setTablesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java index 762f465..0bcd837 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java @@ -453,13 +453,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 1: // ABORTED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set572 = iprot.readSetBegin(); - struct.aborted = new HashSet(2*_set572.size); - long _elem573; - for (int _i574 = 0; _i574 < _set572.size; ++_i574) + org.apache.thrift.protocol.TSet _set612 = iprot.readSetBegin(); + struct.aborted = new HashSet(2*_set612.size); + long _elem613; + for (int _i614 = 0; _i614 < _set612.size; ++_i614) { - _elem573 = iprot.readI64(); - struct.aborted.add(_elem573); + _elem613 = iprot.readI64(); + struct.aborted.add(_elem613); } iprot.readSetEnd(); } @@ -471,13 +471,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 2: // NOSUCH if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set575 = iprot.readSetBegin(); - struct.nosuch = new HashSet(2*_set575.size); - long _elem576; - for (int _i577 = 0; _i577 < _set575.size; ++_i577) + org.apache.thrift.protocol.TSet _set615 = iprot.readSetBegin(); + struct.nosuch = new HashSet(2*_set615.size); + long _elem616; + for (int _i617 = 0; _i617 < _set615.size; ++_i617) { - _elem576 = iprot.readI64(); - struct.nosuch.add(_elem576); + _elem616 = iprot.readI64(); + struct.nosuch.add(_elem616); } iprot.readSetEnd(); } @@ -503,9 +503,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(ABORTED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.aborted.size())); - for (long _iter578 : struct.aborted) + for (long _iter618 : struct.aborted) { - oprot.writeI64(_iter578); + oprot.writeI64(_iter618); } oprot.writeSetEnd(); } @@ -515,9 +515,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(NOSUCH_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.nosuch.size())); - for (long _iter579 : struct.nosuch) + for (long _iter619 : struct.nosuch) { - oprot.writeI64(_iter579); + oprot.writeI64(_iter619); } oprot.writeSetEnd(); } @@ -542,16 +542,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.aborted.size()); - for (long _iter580 : struct.aborted) + for (long _iter620 : struct.aborted) { - oprot.writeI64(_iter580); + oprot.writeI64(_iter620); } } { oprot.writeI32(struct.nosuch.size()); - for (long _iter581 : struct.nosuch) + for (long _iter621 : struct.nosuch) { - oprot.writeI64(_iter581); + oprot.writeI64(_iter621); } } } @@ -560,24 +560,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TSet _set582 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.aborted = new HashSet(2*_set582.size); - long _elem583; - for (int _i584 = 0; _i584 < _set582.size; ++_i584) + org.apache.thrift.protocol.TSet _set622 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.aborted = new HashSet(2*_set622.size); + long _elem623; + for (int _i624 = 0; _i624 < _set622.size; ++_i624) { - _elem583 = iprot.readI64(); - struct.aborted.add(_elem583); + _elem623 = iprot.readI64(); + struct.aborted.add(_elem623); } } struct.setAbortedIsSet(true); { - org.apache.thrift.protocol.TSet _set585 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.nosuch = new HashSet(2*_set585.size); - long _elem586; - for (int _i587 = 0; _i587 < _set585.size; ++_i587) + org.apache.thrift.protocol.TSet _set625 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.nosuch = new HashSet(2*_set625.size); + long _elem626; + for (int _i627 = 0; _i627 < _set625.size; ++_i627) { - _elem586 = iprot.readI64(); - struct.nosuch.add(_elem586); + _elem626 = iprot.readI64(); + struct.nosuch.add(_elem626); } } struct.setNosuchIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java index e23bc04..85272dd 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java @@ -538,13 +538,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 2: // FILES_ADDED if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list630 = iprot.readListBegin(); - struct.filesAdded = new ArrayList(_list630.size); - String _elem631; - for (int _i632 = 0; _i632 < _list630.size; ++_i632) + org.apache.thrift.protocol.TList _list670 = iprot.readListBegin(); + struct.filesAdded = new ArrayList(_list670.size); + String _elem671; + for (int _i672 = 0; _i672 < _list670.size; ++_i672) { - _elem631 = iprot.readString(); - struct.filesAdded.add(_elem631); + _elem671 = iprot.readString(); + struct.filesAdded.add(_elem671); } iprot.readListEnd(); } @@ -556,13 +556,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 3: // FILES_ADDED_CHECKSUM if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list633 = iprot.readListBegin(); - struct.filesAddedChecksum = new ArrayList(_list633.size); - String _elem634; - for (int _i635 = 0; _i635 < _list633.size; ++_i635) + org.apache.thrift.protocol.TList _list673 = iprot.readListBegin(); + struct.filesAddedChecksum = new ArrayList(_list673.size); + String _elem674; + for (int _i675 = 0; _i675 < _list673.size; ++_i675) { - _elem634 = iprot.readString(); - struct.filesAddedChecksum.add(_elem634); + _elem674 = iprot.readString(); + struct.filesAddedChecksum.add(_elem674); } iprot.readListEnd(); } @@ -593,9 +593,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAdded.size())); - for (String _iter636 : struct.filesAdded) + for (String _iter676 : struct.filesAdded) { - oprot.writeString(_iter636); + oprot.writeString(_iter676); } oprot.writeListEnd(); } @@ -606,9 +606,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_CHECKSUM_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAddedChecksum.size())); - for (String _iter637 : struct.filesAddedChecksum) + for (String _iter677 : struct.filesAddedChecksum) { - oprot.writeString(_iter637); + oprot.writeString(_iter677); } oprot.writeListEnd(); } @@ -634,9 +634,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.filesAdded.size()); - for (String _iter638 : struct.filesAdded) + for (String _iter678 : struct.filesAdded) { - oprot.writeString(_iter638); + oprot.writeString(_iter678); } } BitSet optionals = new BitSet(); @@ -653,9 +653,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD if (struct.isSetFilesAddedChecksum()) { { oprot.writeI32(struct.filesAddedChecksum.size()); - for (String _iter639 : struct.filesAddedChecksum) + for (String _iter679 : struct.filesAddedChecksum) { - oprot.writeString(_iter639); + oprot.writeString(_iter679); } } } @@ -665,13 +665,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestData struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list640 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAdded = new ArrayList(_list640.size); - String _elem641; - for (int _i642 = 0; _i642 < _list640.size; ++_i642) + org.apache.thrift.protocol.TList _list680 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAdded = new ArrayList(_list680.size); + String _elem681; + for (int _i682 = 0; _i682 < _list680.size; ++_i682) { - _elem641 = iprot.readString(); - struct.filesAdded.add(_elem641); + _elem681 = iprot.readString(); + struct.filesAdded.add(_elem681); } } struct.setFilesAddedIsSet(true); @@ -682,13 +682,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestDa } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list643 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAddedChecksum = new ArrayList(_list643.size); - String _elem644; - for (int _i645 = 0; _i645 < _list643.size; ++_i645) + org.apache.thrift.protocol.TList _list683 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAddedChecksum = new ArrayList(_list683.size); + String _elem684; + for (int _i685 = 0; _i685 < _list683.size; ++_i685) { - _elem644 = iprot.readString(); - struct.filesAddedChecksum.add(_elem644); + _elem684 = iprot.readString(); + struct.filesAddedChecksum.add(_elem684); } } struct.setFilesAddedChecksumIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java index 6aaed5c..cfdd0bd 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java @@ -689,14 +689,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LockRequest struct) case 1: // COMPONENT if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list556 = iprot.readListBegin(); - struct.component = new ArrayList(_list556.size); - LockComponent _elem557; - for (int _i558 = 0; _i558 < _list556.size; ++_i558) + org.apache.thrift.protocol.TList _list596 = iprot.readListBegin(); + struct.component = new ArrayList(_list596.size); + LockComponent _elem597; + for (int _i598 = 0; _i598 < _list596.size; ++_i598) { - _elem557 = new LockComponent(); - _elem557.read(iprot); - struct.component.add(_elem557); + _elem597 = new LockComponent(); + _elem597.read(iprot); + struct.component.add(_elem597); } iprot.readListEnd(); } @@ -754,9 +754,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LockRequest struct oprot.writeFieldBegin(COMPONENT_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.component.size())); - for (LockComponent _iter559 : struct.component) + for (LockComponent _iter599 : struct.component) { - _iter559.write(oprot); + _iter599.write(oprot); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.component.size()); - for (LockComponent _iter560 : struct.component) + for (LockComponent _iter600 : struct.component) { - _iter560.write(oprot); + _iter600.write(oprot); } } oprot.writeString(struct.user); @@ -830,14 +830,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list561 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.component = new ArrayList(_list561.size); - LockComponent _elem562; - for (int _i563 = 0; _i563 < _list561.size; ++_i563) + org.apache.thrift.protocol.TList _list601 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.component = new ArrayList(_list601.size); + LockComponent _elem602; + for (int _i603 = 0; _i603 < _list601.size; ++_i603) { - _elem562 = new LockComponent(); - _elem562.read(iprot); - struct.component.add(_elem562); + _elem602 = new LockComponent(); + _elem602.read(iprot); + struct.component.add(_elem602); } } struct.setComponentIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java index b399d66..b08a436 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java @@ -533,13 +533,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Materialization str case 2: // TABLES_USED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set746 = iprot.readSetBegin(); - struct.tablesUsed = new HashSet(2*_set746.size); - String _elem747; - for (int _i748 = 0; _i748 < _set746.size; ++_i748) + org.apache.thrift.protocol.TSet _set786 = iprot.readSetBegin(); + struct.tablesUsed = new HashSet(2*_set786.size); + String _elem787; + for (int _i788 = 0; _i788 < _set786.size; ++_i788) { - _elem747 = iprot.readString(); - struct.tablesUsed.add(_elem747); + _elem787 = iprot.readString(); + struct.tablesUsed.add(_elem787); } iprot.readSetEnd(); } @@ -578,9 +578,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Materialization st oprot.writeFieldBegin(TABLES_USED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.tablesUsed.size())); - for (String _iter749 : struct.tablesUsed) + for (String _iter789 : struct.tablesUsed) { - oprot.writeString(_iter749); + oprot.writeString(_iter789); } oprot.writeSetEnd(); } @@ -609,9 +609,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Materialization str struct.materializationTable.write(oprot); { oprot.writeI32(struct.tablesUsed.size()); - for (String _iter750 : struct.tablesUsed) + for (String _iter790 : struct.tablesUsed) { - oprot.writeString(_iter750); + oprot.writeString(_iter790); } } oprot.writeI64(struct.invalidationTime); @@ -624,13 +624,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Materialization stru struct.materializationTable.read(iprot); struct.setMaterializationTableIsSet(true); { - org.apache.thrift.protocol.TSet _set751 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tablesUsed = new HashSet(2*_set751.size); - String _elem752; - for (int _i753 = 0; _i753 < _set751.size; ++_i753) + org.apache.thrift.protocol.TSet _set791 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tablesUsed = new HashSet(2*_set791.size); + String _elem792; + for (int _i793 = 0; _i793 < _set791.size; ++_i793) { - _elem752 = iprot.readString(); - struct.tablesUsed.add(_elem752); + _elem792 = iprot.readString(); + struct.tablesUsed.add(_elem792); } } struct.setTablesUsedIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java index a28350b..549c14b 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRe case 1: // EVENTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list622 = iprot.readListBegin(); - struct.events = new ArrayList(_list622.size); - NotificationEvent _elem623; - for (int _i624 = 0; _i624 < _list622.size; ++_i624) + org.apache.thrift.protocol.TList _list662 = iprot.readListBegin(); + struct.events = new ArrayList(_list662.size); + NotificationEvent _elem663; + for (int _i664 = 0; _i664 < _list662.size; ++_i664) { - _elem623 = new NotificationEvent(); - _elem623.read(iprot); - struct.events.add(_elem623); + _elem663 = new NotificationEvent(); + _elem663.read(iprot); + struct.events.add(_elem663); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventR oprot.writeFieldBegin(EVENTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.events.size())); - for (NotificationEvent _iter625 : struct.events) + for (NotificationEvent _iter665 : struct.events) { - _iter625.write(oprot); + _iter665.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.events.size()); - for (NotificationEvent _iter626 : struct.events) + for (NotificationEvent _iter666 : struct.events) { - _iter626.write(oprot); + _iter666.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.events = new ArrayList(_list627.size); - NotificationEvent _elem628; - for (int _i629 = 0; _i629 < _list627.size; ++_i629) + org.apache.thrift.protocol.TList _list667 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.events = new ArrayList(_list667.size); + NotificationEvent _elem668; + for (int _i669 = 0; _i669 < _list667.size; ++_i669) { - _elem628 = new NotificationEvent(); - _elem628.read(iprot); - struct.events.add(_elem628); + _elem668 = new NotificationEvent(); + _elem668.read(iprot); + struct.events.add(_elem668); } } struct.setEventsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenWriteIds.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenWriteIds.java new file mode 100644 index 0000000..8622430 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenWriteIds.java @@ -0,0 +1,851 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class OpenWriteIds implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenWriteIds"); + + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField WRITE_ID_HIGH_WATER_MARK_FIELD_DESC = new org.apache.thrift.protocol.TField("writeIdHighWaterMark", org.apache.thrift.protocol.TType.I64, (short)2); + private static final org.apache.thrift.protocol.TField OPEN_WRITE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("openWriteIds", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField MIN_WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("minWriteId", org.apache.thrift.protocol.TType.I64, (short)4); + private static final org.apache.thrift.protocol.TField ABORTED_BITS_FIELD_DESC = new org.apache.thrift.protocol.TField("abortedBits", org.apache.thrift.protocol.TType.STRING, (short)5); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new OpenWriteIdsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new OpenWriteIdsTupleSchemeFactory()); + } + + private String tableName; // required + private long writeIdHighWaterMark; // required + private List openWriteIds; // required + private long minWriteId; // optional + private ByteBuffer abortedBits; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TABLE_NAME((short)1, "tableName"), + WRITE_ID_HIGH_WATER_MARK((short)2, "writeIdHighWaterMark"), + OPEN_WRITE_IDS((short)3, "openWriteIds"), + MIN_WRITE_ID((short)4, "minWriteId"), + ABORTED_BITS((short)5, "abortedBits"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TABLE_NAME + return TABLE_NAME; + case 2: // WRITE_ID_HIGH_WATER_MARK + return WRITE_ID_HIGH_WATER_MARK; + case 3: // OPEN_WRITE_IDS + return OPEN_WRITE_IDS; + case 4: // MIN_WRITE_ID + return MIN_WRITE_ID; + case 5: // ABORTED_BITS + return ABORTED_BITS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __WRITEIDHIGHWATERMARK_ISSET_ID = 0; + private static final int __MINWRITEID_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.MIN_WRITE_ID}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.WRITE_ID_HIGH_WATER_MARK, new org.apache.thrift.meta_data.FieldMetaData("writeIdHighWaterMark", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.OPEN_WRITE_IDS, new org.apache.thrift.meta_data.FieldMetaData("openWriteIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); + tmpMap.put(_Fields.MIN_WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("minWriteId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.ABORTED_BITS, new org.apache.thrift.meta_data.FieldMetaData("abortedBits", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(OpenWriteIds.class, metaDataMap); + } + + public OpenWriteIds() { + } + + public OpenWriteIds( + String tableName, + long writeIdHighWaterMark, + List openWriteIds, + ByteBuffer abortedBits) + { + this(); + this.tableName = tableName; + this.writeIdHighWaterMark = writeIdHighWaterMark; + setWriteIdHighWaterMarkIsSet(true); + this.openWriteIds = openWriteIds; + this.abortedBits = org.apache.thrift.TBaseHelper.copyBinary(abortedBits); + } + + /** + * Performs a deep copy on other. + */ + public OpenWriteIds(OpenWriteIds other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetTableName()) { + this.tableName = other.tableName; + } + this.writeIdHighWaterMark = other.writeIdHighWaterMark; + if (other.isSetOpenWriteIds()) { + List __this__openWriteIds = new ArrayList(other.openWriteIds); + this.openWriteIds = __this__openWriteIds; + } + this.minWriteId = other.minWriteId; + if (other.isSetAbortedBits()) { + this.abortedBits = org.apache.thrift.TBaseHelper.copyBinary(other.abortedBits); + } + } + + public OpenWriteIds deepCopy() { + return new OpenWriteIds(this); + } + + @Override + public void clear() { + this.tableName = null; + setWriteIdHighWaterMarkIsSet(false); + this.writeIdHighWaterMark = 0; + this.openWriteIds = null; + setMinWriteIdIsSet(false); + this.minWriteId = 0; + this.abortedBits = null; + } + + public String getTableName() { + return this.tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public void unsetTableName() { + this.tableName = null; + } + + /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ + public boolean isSetTableName() { + return this.tableName != null; + } + + public void setTableNameIsSet(boolean value) { + if (!value) { + this.tableName = null; + } + } + + public long getWriteIdHighWaterMark() { + return this.writeIdHighWaterMark; + } + + public void setWriteIdHighWaterMark(long writeIdHighWaterMark) { + this.writeIdHighWaterMark = writeIdHighWaterMark; + setWriteIdHighWaterMarkIsSet(true); + } + + public void unsetWriteIdHighWaterMark() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEIDHIGHWATERMARK_ISSET_ID); + } + + /** Returns true if field writeIdHighWaterMark is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteIdHighWaterMark() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEIDHIGHWATERMARK_ISSET_ID); + } + + public void setWriteIdHighWaterMarkIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEIDHIGHWATERMARK_ISSET_ID, value); + } + + public int getOpenWriteIdsSize() { + return (this.openWriteIds == null) ? 0 : this.openWriteIds.size(); + } + + public java.util.Iterator getOpenWriteIdsIterator() { + return (this.openWriteIds == null) ? null : this.openWriteIds.iterator(); + } + + public void addToOpenWriteIds(long elem) { + if (this.openWriteIds == null) { + this.openWriteIds = new ArrayList(); + } + this.openWriteIds.add(elem); + } + + public List getOpenWriteIds() { + return this.openWriteIds; + } + + public void setOpenWriteIds(List openWriteIds) { + this.openWriteIds = openWriteIds; + } + + public void unsetOpenWriteIds() { + this.openWriteIds = null; + } + + /** Returns true if field openWriteIds is set (has been assigned a value) and false otherwise */ + public boolean isSetOpenWriteIds() { + return this.openWriteIds != null; + } + + public void setOpenWriteIdsIsSet(boolean value) { + if (!value) { + this.openWriteIds = null; + } + } + + public long getMinWriteId() { + return this.minWriteId; + } + + public void setMinWriteId(long minWriteId) { + this.minWriteId = minWriteId; + setMinWriteIdIsSet(true); + } + + public void unsetMinWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MINWRITEID_ISSET_ID); + } + + /** Returns true if field minWriteId is set (has been assigned a value) and false otherwise */ + public boolean isSetMinWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __MINWRITEID_ISSET_ID); + } + + public void setMinWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MINWRITEID_ISSET_ID, value); + } + + public byte[] getAbortedBits() { + setAbortedBits(org.apache.thrift.TBaseHelper.rightSize(abortedBits)); + return abortedBits == null ? null : abortedBits.array(); + } + + public ByteBuffer bufferForAbortedBits() { + return org.apache.thrift.TBaseHelper.copyBinary(abortedBits); + } + + public void setAbortedBits(byte[] abortedBits) { + this.abortedBits = abortedBits == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(abortedBits, abortedBits.length)); + } + + public void setAbortedBits(ByteBuffer abortedBits) { + this.abortedBits = org.apache.thrift.TBaseHelper.copyBinary(abortedBits); + } + + public void unsetAbortedBits() { + this.abortedBits = null; + } + + /** Returns true if field abortedBits is set (has been assigned a value) and false otherwise */ + public boolean isSetAbortedBits() { + return this.abortedBits != null; + } + + public void setAbortedBitsIsSet(boolean value) { + if (!value) { + this.abortedBits = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TABLE_NAME: + if (value == null) { + unsetTableName(); + } else { + setTableName((String)value); + } + break; + + case WRITE_ID_HIGH_WATER_MARK: + if (value == null) { + unsetWriteIdHighWaterMark(); + } else { + setWriteIdHighWaterMark((Long)value); + } + break; + + case OPEN_WRITE_IDS: + if (value == null) { + unsetOpenWriteIds(); + } else { + setOpenWriteIds((List)value); + } + break; + + case MIN_WRITE_ID: + if (value == null) { + unsetMinWriteId(); + } else { + setMinWriteId((Long)value); + } + break; + + case ABORTED_BITS: + if (value == null) { + unsetAbortedBits(); + } else { + setAbortedBits((ByteBuffer)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TABLE_NAME: + return getTableName(); + + case WRITE_ID_HIGH_WATER_MARK: + return getWriteIdHighWaterMark(); + + case OPEN_WRITE_IDS: + return getOpenWriteIds(); + + case MIN_WRITE_ID: + return getMinWriteId(); + + case ABORTED_BITS: + return getAbortedBits(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TABLE_NAME: + return isSetTableName(); + case WRITE_ID_HIGH_WATER_MARK: + return isSetWriteIdHighWaterMark(); + case OPEN_WRITE_IDS: + return isSetOpenWriteIds(); + case MIN_WRITE_ID: + return isSetMinWriteId(); + case ABORTED_BITS: + return isSetAbortedBits(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof OpenWriteIds) + return this.equals((OpenWriteIds)that); + return false; + } + + public boolean equals(OpenWriteIds that) { + if (that == null) + return false; + + boolean this_present_tableName = true && this.isSetTableName(); + boolean that_present_tableName = true && that.isSetTableName(); + if (this_present_tableName || that_present_tableName) { + if (!(this_present_tableName && that_present_tableName)) + return false; + if (!this.tableName.equals(that.tableName)) + return false; + } + + boolean this_present_writeIdHighWaterMark = true; + boolean that_present_writeIdHighWaterMark = true; + if (this_present_writeIdHighWaterMark || that_present_writeIdHighWaterMark) { + if (!(this_present_writeIdHighWaterMark && that_present_writeIdHighWaterMark)) + return false; + if (this.writeIdHighWaterMark != that.writeIdHighWaterMark) + return false; + } + + boolean this_present_openWriteIds = true && this.isSetOpenWriteIds(); + boolean that_present_openWriteIds = true && that.isSetOpenWriteIds(); + if (this_present_openWriteIds || that_present_openWriteIds) { + if (!(this_present_openWriteIds && that_present_openWriteIds)) + return false; + if (!this.openWriteIds.equals(that.openWriteIds)) + return false; + } + + boolean this_present_minWriteId = true && this.isSetMinWriteId(); + boolean that_present_minWriteId = true && that.isSetMinWriteId(); + if (this_present_minWriteId || that_present_minWriteId) { + if (!(this_present_minWriteId && that_present_minWriteId)) + return false; + if (this.minWriteId != that.minWriteId) + return false; + } + + boolean this_present_abortedBits = true && this.isSetAbortedBits(); + boolean that_present_abortedBits = true && that.isSetAbortedBits(); + if (this_present_abortedBits || that_present_abortedBits) { + if (!(this_present_abortedBits && that_present_abortedBits)) + return false; + if (!this.abortedBits.equals(that.abortedBits)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_tableName = true && (isSetTableName()); + list.add(present_tableName); + if (present_tableName) + list.add(tableName); + + boolean present_writeIdHighWaterMark = true; + list.add(present_writeIdHighWaterMark); + if (present_writeIdHighWaterMark) + list.add(writeIdHighWaterMark); + + boolean present_openWriteIds = true && (isSetOpenWriteIds()); + list.add(present_openWriteIds); + if (present_openWriteIds) + list.add(openWriteIds); + + boolean present_minWriteId = true && (isSetMinWriteId()); + list.add(present_minWriteId); + if (present_minWriteId) + list.add(minWriteId); + + boolean present_abortedBits = true && (isSetAbortedBits()); + list.add(present_abortedBits); + if (present_abortedBits) + list.add(abortedBits); + + return list.hashCode(); + } + + @Override + public int compareTo(OpenWriteIds other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteIdHighWaterMark()).compareTo(other.isSetWriteIdHighWaterMark()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteIdHighWaterMark()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeIdHighWaterMark, other.writeIdHighWaterMark); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetOpenWriteIds()).compareTo(other.isSetOpenWriteIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetOpenWriteIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.openWriteIds, other.openWriteIds); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMinWriteId()).compareTo(other.isSetMinWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMinWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.minWriteId, other.minWriteId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetAbortedBits()).compareTo(other.isSetAbortedBits()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetAbortedBits()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.abortedBits, other.abortedBits); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("OpenWriteIds("); + boolean first = true; + + sb.append("tableName:"); + if (this.tableName == null) { + sb.append("null"); + } else { + sb.append(this.tableName); + } + first = false; + if (!first) sb.append(", "); + sb.append("writeIdHighWaterMark:"); + sb.append(this.writeIdHighWaterMark); + first = false; + if (!first) sb.append(", "); + sb.append("openWriteIds:"); + if (this.openWriteIds == null) { + sb.append("null"); + } else { + sb.append(this.openWriteIds); + } + first = false; + if (isSetMinWriteId()) { + if (!first) sb.append(", "); + sb.append("minWriteId:"); + sb.append(this.minWriteId); + first = false; + } + if (!first) sb.append(", "); + sb.append("abortedBits:"); + if (this.abortedBits == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.abortedBits, sb); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetTableName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString()); + } + + if (!isSetWriteIdHighWaterMark()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeIdHighWaterMark' is unset! Struct:" + toString()); + } + + if (!isSetOpenWriteIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'openWriteIds' is unset! Struct:" + toString()); + } + + if (!isSetAbortedBits()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'abortedBits' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class OpenWriteIdsStandardSchemeFactory implements SchemeFactory { + public OpenWriteIdsStandardScheme getScheme() { + return new OpenWriteIdsStandardScheme(); + } + } + + private static class OpenWriteIdsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, OpenWriteIds struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // WRITE_ID_HIGH_WATER_MARK + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeIdHighWaterMark = iprot.readI64(); + struct.setWriteIdHighWaterMarkIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // OPEN_WRITE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list564 = iprot.readListBegin(); + struct.openWriteIds = new ArrayList(_list564.size); + long _elem565; + for (int _i566 = 0; _i566 < _list564.size; ++_i566) + { + _elem565 = iprot.readI64(); + struct.openWriteIds.add(_elem565); + } + iprot.readListEnd(); + } + struct.setOpenWriteIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // MIN_WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.minWriteId = iprot.readI64(); + struct.setMinWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // ABORTED_BITS + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.abortedBits = iprot.readBinary(); + struct.setAbortedBitsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, OpenWriteIds struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.tableName != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.tableName); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(WRITE_ID_HIGH_WATER_MARK_FIELD_DESC); + oprot.writeI64(struct.writeIdHighWaterMark); + oprot.writeFieldEnd(); + if (struct.openWriteIds != null) { + oprot.writeFieldBegin(OPEN_WRITE_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.openWriteIds.size())); + for (long _iter567 : struct.openWriteIds) + { + oprot.writeI64(_iter567); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.isSetMinWriteId()) { + oprot.writeFieldBegin(MIN_WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.minWriteId); + oprot.writeFieldEnd(); + } + if (struct.abortedBits != null) { + oprot.writeFieldBegin(ABORTED_BITS_FIELD_DESC); + oprot.writeBinary(struct.abortedBits); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class OpenWriteIdsTupleSchemeFactory implements SchemeFactory { + public OpenWriteIdsTupleScheme getScheme() { + return new OpenWriteIdsTupleScheme(); + } + } + + private static class OpenWriteIdsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, OpenWriteIds struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.tableName); + oprot.writeI64(struct.writeIdHighWaterMark); + { + oprot.writeI32(struct.openWriteIds.size()); + for (long _iter568 : struct.openWriteIds) + { + oprot.writeI64(_iter568); + } + } + oprot.writeBinary(struct.abortedBits); + BitSet optionals = new BitSet(); + if (struct.isSetMinWriteId()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetMinWriteId()) { + oprot.writeI64(struct.minWriteId); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, OpenWriteIds struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + struct.writeIdHighWaterMark = iprot.readI64(); + struct.setWriteIdHighWaterMarkIsSet(true); + { + org.apache.thrift.protocol.TList _list569 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.openWriteIds = new ArrayList(_list569.size); + long _elem570; + for (int _i571 = 0; _i571 < _list569.size; ++_i571) + { + _elem570 = iprot.readI64(); + struct.openWriteIds.add(_elem570); + } + } + struct.setOpenWriteIdsIsSet(true); + struct.abortedBits = iprot.readBinary(); + struct.setAbortedBitsIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.minWriteId = iprot.readI64(); + struct.setMinWriteIdIsSet(true); + } + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java index 4bdca8c..e4089c5 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java @@ -547,13 +547,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list690 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list690.size); - long _elem691; - for (int _i692 = 0; _i692 < _list690.size; ++_i692) + org.apache.thrift.protocol.TList _list730 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list730.size); + long _elem731; + for (int _i732 = 0; _i732 < _list730.size; ++_i732) { - _elem691 = iprot.readI64(); - struct.fileIds.add(_elem691); + _elem731 = iprot.readI64(); + struct.fileIds.add(_elem731); } iprot.readListEnd(); } @@ -565,13 +565,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 2: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list693 = iprot.readListBegin(); - struct.metadata = new ArrayList(_list693.size); - ByteBuffer _elem694; - for (int _i695 = 0; _i695 < _list693.size; ++_i695) + org.apache.thrift.protocol.TList _list733 = iprot.readListBegin(); + struct.metadata = new ArrayList(_list733.size); + ByteBuffer _elem734; + for (int _i735 = 0; _i735 < _list733.size; ++_i735) { - _elem694 = iprot.readBinary(); - struct.metadata.add(_elem694); + _elem734 = iprot.readBinary(); + struct.metadata.add(_elem734); } iprot.readListEnd(); } @@ -605,9 +605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter696 : struct.fileIds) + for (long _iter736 : struct.fileIds) { - oprot.writeI64(_iter696); + oprot.writeI64(_iter736); } oprot.writeListEnd(); } @@ -617,9 +617,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (ByteBuffer _iter697 : struct.metadata) + for (ByteBuffer _iter737 : struct.metadata) { - oprot.writeBinary(_iter697); + oprot.writeBinary(_iter737); } oprot.writeListEnd(); } @@ -651,16 +651,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter698 : struct.fileIds) + for (long _iter738 : struct.fileIds) { - oprot.writeI64(_iter698); + oprot.writeI64(_iter738); } } { oprot.writeI32(struct.metadata.size()); - for (ByteBuffer _iter699 : struct.metadata) + for (ByteBuffer _iter739 : struct.metadata) { - oprot.writeBinary(_iter699); + oprot.writeBinary(_iter739); } } BitSet optionals = new BitSet(); @@ -677,24 +677,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list700 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list700.size); - long _elem701; - for (int _i702 = 0; _i702 < _list700.size; ++_i702) + org.apache.thrift.protocol.TList _list740 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list740.size); + long _elem741; + for (int _i742 = 0; _i742 < _list740.size; ++_i742) { - _elem701 = iprot.readI64(); - struct.fileIds.add(_elem701); + _elem741 = iprot.readI64(); + struct.fileIds.add(_elem741); } } struct.setFileIdsIsSet(true); { - org.apache.thrift.protocol.TList _list703 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new ArrayList(_list703.size); - ByteBuffer _elem704; - for (int _i705 = 0; _i705 < _list703.size; ++_i705) + org.apache.thrift.protocol.TList _list743 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new ArrayList(_list743.size); + ByteBuffer _elem744; + for (int _i745 = 0; _i745 < _list743.size; ++_i745) { - _elem704 = iprot.readBinary(); - struct.metadata.add(_elem704); + _elem744 = iprot.readBinary(); + struct.metadata.add(_elem744); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java index 5687b19..fb7b94e 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse case 1: // COMPACTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list598 = iprot.readListBegin(); - struct.compacts = new ArrayList(_list598.size); - ShowCompactResponseElement _elem599; - for (int _i600 = 0; _i600 < _list598.size; ++_i600) + org.apache.thrift.protocol.TList _list638 = iprot.readListBegin(); + struct.compacts = new ArrayList(_list638.size); + ShowCompactResponseElement _elem639; + for (int _i640 = 0; _i640 < _list638.size; ++_i640) { - _elem599 = new ShowCompactResponseElement(); - _elem599.read(iprot); - struct.compacts.add(_elem599); + _elem639 = new ShowCompactResponseElement(); + _elem639.read(iprot); + struct.compacts.add(_elem639); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeFieldBegin(COMPACTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.compacts.size())); - for (ShowCompactResponseElement _iter601 : struct.compacts) + for (ShowCompactResponseElement _iter641 : struct.compacts) { - _iter601.write(oprot); + _iter641.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.compacts.size()); - for (ShowCompactResponseElement _iter602 : struct.compacts) + for (ShowCompactResponseElement _iter642 : struct.compacts) { - _iter602.write(oprot); + _iter642.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.compacts = new ArrayList(_list603.size); - ShowCompactResponseElement _elem604; - for (int _i605 = 0; _i605 < _list603.size; ++_i605) + org.apache.thrift.protocol.TList _list643 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.compacts = new ArrayList(_list643.size); + ShowCompactResponseElement _elem644; + for (int _i645 = 0; _i645 < _list643.size; ++_i645) { - _elem604 = new ShowCompactResponseElement(); - _elem604.read(iprot); - struct.compacts.add(_elem604); + _elem644 = new ShowCompactResponseElement(); + _elem644.read(iprot); + struct.compacts.add(_elem644); } } struct.setCompactsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java index f22deb2..02dd278 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowLocksResponse s case 1: // LOCKS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list564 = iprot.readListBegin(); - struct.locks = new ArrayList(_list564.size); - ShowLocksResponseElement _elem565; - for (int _i566 = 0; _i566 < _list564.size; ++_i566) + org.apache.thrift.protocol.TList _list604 = iprot.readListBegin(); + struct.locks = new ArrayList(_list604.size); + ShowLocksResponseElement _elem605; + for (int _i606 = 0; _i606 < _list604.size; ++_i606) { - _elem565 = new ShowLocksResponseElement(); - _elem565.read(iprot); - struct.locks.add(_elem565); + _elem605 = new ShowLocksResponseElement(); + _elem605.read(iprot); + struct.locks.add(_elem605); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowLocksResponse oprot.writeFieldBegin(LOCKS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.locks.size())); - for (ShowLocksResponseElement _iter567 : struct.locks) + for (ShowLocksResponseElement _iter607 : struct.locks) { - _iter567.write(oprot); + _iter607.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse s if (struct.isSetLocks()) { { oprot.writeI32(struct.locks.size()); - for (ShowLocksResponseElement _iter568 : struct.locks) + for (ShowLocksResponseElement _iter608 : struct.locks) { - _iter568.write(oprot); + _iter608.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list569 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.locks = new ArrayList(_list569.size); - ShowLocksResponseElement _elem570; - for (int _i571 = 0; _i571 < _list569.size; ++_i571) + org.apache.thrift.protocol.TList _list609 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.locks = new ArrayList(_list609.size); + ShowLocksResponseElement _elem610; + for (int _i611 = 0; _i611 < _list609.size; ++_i611) { - _elem570 = new ShowLocksResponseElement(); - _elem570.read(iprot); - struct.locks.add(_elem570); + _elem610 = new ShowLocksResponseElement(); + _elem610.read(iprot); + struct.locks.add(_elem610); } } struct.setLocksIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index d5e3527..b789f7e 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -326,6 +326,10 @@ public void commit_txn(CommitTxnRequest rqst) throws NoSuchTxnException, TxnAbortedException, org.apache.thrift.TException; + public GetOpenWriteIdsResponse get_open_write_ids(GetOpenWriteIdsRequest rqst) throws NoSuchTxnException, MetaException, org.apache.thrift.TException; + + public AllocateTableWriteIdResponse allocate_table_write_id(AllocateTableWriteIdRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException, org.apache.thrift.TException; + public LockResponse lock(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, org.apache.thrift.TException; public LockResponse check_lock(CheckLockRequest rqst) throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, org.apache.thrift.TException; @@ -692,6 +696,10 @@ public void commit_txn(CommitTxnRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_open_write_ids(GetOpenWriteIdsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void allocate_table_write_id(AllocateTableWriteIdRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void lock(LockRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void check_lock(CheckLockRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -4966,6 +4974,67 @@ public void recv_commit_txn() throws NoSuchTxnException, TxnAbortedException, or return; } + public GetOpenWriteIdsResponse get_open_write_ids(GetOpenWriteIdsRequest rqst) throws NoSuchTxnException, MetaException, org.apache.thrift.TException + { + send_get_open_write_ids(rqst); + return recv_get_open_write_ids(); + } + + public void send_get_open_write_ids(GetOpenWriteIdsRequest rqst) throws org.apache.thrift.TException + { + get_open_write_ids_args args = new get_open_write_ids_args(); + args.setRqst(rqst); + sendBase("get_open_write_ids", args); + } + + public GetOpenWriteIdsResponse recv_get_open_write_ids() throws NoSuchTxnException, MetaException, org.apache.thrift.TException + { + get_open_write_ids_result result = new get_open_write_ids_result(); + receiveBase(result, "get_open_write_ids"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_open_write_ids failed: unknown result"); + } + + public AllocateTableWriteIdResponse allocate_table_write_id(AllocateTableWriteIdRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException, org.apache.thrift.TException + { + send_allocate_table_write_id(rqst); + return recv_allocate_table_write_id(); + } + + public void send_allocate_table_write_id(AllocateTableWriteIdRequest rqst) throws org.apache.thrift.TException + { + allocate_table_write_id_args args = new allocate_table_write_id_args(); + args.setRqst(rqst); + sendBase("allocate_table_write_id", args); + } + + public AllocateTableWriteIdResponse recv_allocate_table_write_id() throws NoSuchTxnException, TxnAbortedException, MetaException, org.apache.thrift.TException + { + allocate_table_write_id_result result = new allocate_table_write_id_result(); + receiveBase(result, "allocate_table_write_id"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "allocate_table_write_id failed: unknown result"); + } + public LockResponse lock(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, org.apache.thrift.TException { send_lock(rqst); @@ -11106,6 +11175,70 @@ public void getResult() throws NoSuchTxnException, TxnAbortedException, org.apac } } + public void get_open_write_ids(GetOpenWriteIdsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_open_write_ids_call method_call = new get_open_write_ids_call(rqst, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_write_ids_call extends org.apache.thrift.async.TAsyncMethodCall { + private GetOpenWriteIdsRequest rqst; + public get_open_write_ids_call(GetOpenWriteIdsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.rqst = rqst; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_open_write_ids", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_open_write_ids_args args = new get_open_write_ids_args(); + args.setRqst(rqst); + args.write(prot); + prot.writeMessageEnd(); + } + + public GetOpenWriteIdsResponse getResult() throws NoSuchTxnException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_open_write_ids(); + } + } + + public void allocate_table_write_id(AllocateTableWriteIdRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + allocate_table_write_id_call method_call = new allocate_table_write_id_call(rqst, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class allocate_table_write_id_call extends org.apache.thrift.async.TAsyncMethodCall { + private AllocateTableWriteIdRequest rqst; + public allocate_table_write_id_call(AllocateTableWriteIdRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.rqst = rqst; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("allocate_table_write_id", org.apache.thrift.protocol.TMessageType.CALL, 0)); + allocate_table_write_id_args args = new allocate_table_write_id_args(); + args.setRqst(rqst); + args.write(prot); + prot.writeMessageEnd(); + } + + public AllocateTableWriteIdResponse getResult() throws NoSuchTxnException, TxnAbortedException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_allocate_table_write_id(); + } + } + public void lock(LockRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); lock_call method_call = new lock_call(rqst, resultHandler, this, ___protocolFactory, ___transport); @@ -12500,6 +12633,8 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public get_open_write_ids() { + super("get_open_write_ids"); + } + + public get_open_write_ids_args getEmptyArgsInstance() { + return new get_open_write_ids_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_open_write_ids_result getResult(I iface, get_open_write_ids_args args) throws org.apache.thrift.TException { + get_open_write_ids_result result = new get_open_write_ids_result(); + try { + result.success = iface.get_open_write_ids(args.rqst); + } catch (NoSuchTxnException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class allocate_table_write_id extends org.apache.thrift.ProcessFunction { + public allocate_table_write_id() { + super("allocate_table_write_id"); + } + + public allocate_table_write_id_args getEmptyArgsInstance() { + return new allocate_table_write_id_args(); + } + + protected boolean isOneway() { + return false; + } + + public allocate_table_write_id_result getResult(I iface, allocate_table_write_id_args args) throws org.apache.thrift.TException { + allocate_table_write_id_result result = new allocate_table_write_id_result(); + try { + result.success = iface.allocate_table_write_id(args.rqst); + } catch (NoSuchTxnException o1) { + result.o1 = o1; + } catch (TxnAbortedException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class lock extends org.apache.thrift.ProcessFunction { public lock() { super("lock"); @@ -17341,6 +17530,8 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { + public get_open_write_ids() { + super("get_open_write_ids"); + } + + public get_open_write_ids_args getEmptyArgsInstance() { + return new get_open_write_ids_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(GetOpenWriteIdsResponse o) { + get_open_write_ids_result result = new get_open_write_ids_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_open_write_ids_result result = new get_open_write_ids_result(); + if (e instanceof NoSuchTxnException) { + result.o1 = (NoSuchTxnException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_open_write_ids_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_open_write_ids(args.rqst,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class allocate_table_write_id extends org.apache.thrift.AsyncProcessFunction { + public allocate_table_write_id() { + super("allocate_table_write_id"); + } + + public allocate_table_write_id_args getEmptyArgsInstance() { + return new allocate_table_write_id_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(AllocateTableWriteIdResponse o) { + allocate_table_write_id_result result = new allocate_table_write_id_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + allocate_table_write_id_result result = new allocate_table_write_id_result(); + if (e instanceof NoSuchTxnException) { + result.o1 = (NoSuchTxnException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof TxnAbortedException) { + result.o2 = (TxnAbortedException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, allocate_table_write_id_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.allocate_table_write_id(args.rqst,resultHandler); + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class lock extends org.apache.thrift.AsyncProcessFunction { public lock() { super("lock"); @@ -33865,13 +34185,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list818 = iprot.readListBegin(); - struct.success = new ArrayList(_list818.size); - String _elem819; - for (int _i820 = 0; _i820 < _list818.size; ++_i820) + org.apache.thrift.protocol.TList _list858 = iprot.readListBegin(); + struct.success = new ArrayList(_list858.size); + String _elem859; + for (int _i860 = 0; _i860 < _list858.size; ++_i860) { - _elem819 = iprot.readString(); - struct.success.add(_elem819); + _elem859 = iprot.readString(); + struct.success.add(_elem859); } iprot.readListEnd(); } @@ -33906,9 +34226,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter821 : struct.success) + for (String _iter861 : struct.success) { - oprot.writeString(_iter821); + oprot.writeString(_iter861); } oprot.writeListEnd(); } @@ -33947,9 +34267,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter822 : struct.success) + for (String _iter862 : struct.success) { - oprot.writeString(_iter822); + oprot.writeString(_iter862); } } } @@ -33964,13 +34284,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list823 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list823.size); - String _elem824; - for (int _i825 = 0; _i825 < _list823.size; ++_i825) + org.apache.thrift.protocol.TList _list863 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list863.size); + String _elem864; + for (int _i865 = 0; _i865 < _list863.size; ++_i865) { - _elem824 = iprot.readString(); - struct.success.add(_elem824); + _elem864 = iprot.readString(); + struct.success.add(_elem864); } } struct.setSuccessIsSet(true); @@ -34624,13 +34944,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list826 = iprot.readListBegin(); - struct.success = new ArrayList(_list826.size); - String _elem827; - for (int _i828 = 0; _i828 < _list826.size; ++_i828) + org.apache.thrift.protocol.TList _list866 = iprot.readListBegin(); + struct.success = new ArrayList(_list866.size); + String _elem867; + for (int _i868 = 0; _i868 < _list866.size; ++_i868) { - _elem827 = iprot.readString(); - struct.success.add(_elem827); + _elem867 = iprot.readString(); + struct.success.add(_elem867); } iprot.readListEnd(); } @@ -34665,9 +34985,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter829 : struct.success) + for (String _iter869 : struct.success) { - oprot.writeString(_iter829); + oprot.writeString(_iter869); } oprot.writeListEnd(); } @@ -34706,9 +35026,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter830 : struct.success) + for (String _iter870 : struct.success) { - oprot.writeString(_iter830); + oprot.writeString(_iter870); } } } @@ -34723,13 +35043,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list831 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list831.size); - String _elem832; - for (int _i833 = 0; _i833 < _list831.size; ++_i833) + org.apache.thrift.protocol.TList _list871 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list871.size); + String _elem872; + for (int _i873 = 0; _i873 < _list871.size; ++_i873) { - _elem832 = iprot.readString(); - struct.success.add(_elem832); + _elem872 = iprot.readString(); + struct.success.add(_elem872); } } struct.setSuccessIsSet(true); @@ -39336,16 +39656,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map834 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map834.size); - String _key835; - Type _val836; - for (int _i837 = 0; _i837 < _map834.size; ++_i837) + org.apache.thrift.protocol.TMap _map874 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map874.size); + String _key875; + Type _val876; + for (int _i877 = 0; _i877 < _map874.size; ++_i877) { - _key835 = iprot.readString(); - _val836 = new Type(); - _val836.read(iprot); - struct.success.put(_key835, _val836); + _key875 = iprot.readString(); + _val876 = new Type(); + _val876.read(iprot); + struct.success.put(_key875, _val876); } iprot.readMapEnd(); } @@ -39380,10 +39700,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter838 : struct.success.entrySet()) + for (Map.Entry _iter878 : struct.success.entrySet()) { - oprot.writeString(_iter838.getKey()); - _iter838.getValue().write(oprot); + oprot.writeString(_iter878.getKey()); + _iter878.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -39422,10 +39742,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter839 : struct.success.entrySet()) + for (Map.Entry _iter879 : struct.success.entrySet()) { - oprot.writeString(_iter839.getKey()); - _iter839.getValue().write(oprot); + oprot.writeString(_iter879.getKey()); + _iter879.getValue().write(oprot); } } } @@ -39440,16 +39760,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map840 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map840.size); - String _key841; - Type _val842; - for (int _i843 = 0; _i843 < _map840.size; ++_i843) + org.apache.thrift.protocol.TMap _map880 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map880.size); + String _key881; + Type _val882; + for (int _i883 = 0; _i883 < _map880.size; ++_i883) { - _key841 = iprot.readString(); - _val842 = new Type(); - _val842.read(iprot); - struct.success.put(_key841, _val842); + _key881 = iprot.readString(); + _val882 = new Type(); + _val882.read(iprot); + struct.success.put(_key881, _val882); } } struct.setSuccessIsSet(true); @@ -40484,14 +40804,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list844 = iprot.readListBegin(); - struct.success = new ArrayList(_list844.size); - FieldSchema _elem845; - for (int _i846 = 0; _i846 < _list844.size; ++_i846) + org.apache.thrift.protocol.TList _list884 = iprot.readListBegin(); + struct.success = new ArrayList(_list884.size); + FieldSchema _elem885; + for (int _i886 = 0; _i886 < _list884.size; ++_i886) { - _elem845 = new FieldSchema(); - _elem845.read(iprot); - struct.success.add(_elem845); + _elem885 = new FieldSchema(); + _elem885.read(iprot); + struct.success.add(_elem885); } iprot.readListEnd(); } @@ -40544,9 +40864,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter847 : struct.success) + for (FieldSchema _iter887 : struct.success) { - _iter847.write(oprot); + _iter887.write(oprot); } oprot.writeListEnd(); } @@ -40601,9 +40921,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter848 : struct.success) + for (FieldSchema _iter888 : struct.success) { - _iter848.write(oprot); + _iter888.write(oprot); } } } @@ -40624,14 +40944,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list849 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list849.size); - FieldSchema _elem850; - for (int _i851 = 0; _i851 < _list849.size; ++_i851) + org.apache.thrift.protocol.TList _list889 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list889.size); + FieldSchema _elem890; + for (int _i891 = 0; _i891 < _list889.size; ++_i891) { - _elem850 = new FieldSchema(); - _elem850.read(iprot); - struct.success.add(_elem850); + _elem890 = new FieldSchema(); + _elem890.read(iprot); + struct.success.add(_elem890); } } struct.setSuccessIsSet(true); @@ -41785,14 +42105,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list852 = iprot.readListBegin(); - struct.success = new ArrayList(_list852.size); - FieldSchema _elem853; - for (int _i854 = 0; _i854 < _list852.size; ++_i854) + org.apache.thrift.protocol.TList _list892 = iprot.readListBegin(); + struct.success = new ArrayList(_list892.size); + FieldSchema _elem893; + for (int _i894 = 0; _i894 < _list892.size; ++_i894) { - _elem853 = new FieldSchema(); - _elem853.read(iprot); - struct.success.add(_elem853); + _elem893 = new FieldSchema(); + _elem893.read(iprot); + struct.success.add(_elem893); } iprot.readListEnd(); } @@ -41845,9 +42165,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter855 : struct.success) + for (FieldSchema _iter895 : struct.success) { - _iter855.write(oprot); + _iter895.write(oprot); } oprot.writeListEnd(); } @@ -41902,9 +42222,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter856 : struct.success) + for (FieldSchema _iter896 : struct.success) { - _iter856.write(oprot); + _iter896.write(oprot); } } } @@ -41925,14 +42245,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list857 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list857.size); - FieldSchema _elem858; - for (int _i859 = 0; _i859 < _list857.size; ++_i859) + org.apache.thrift.protocol.TList _list897 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list897.size); + FieldSchema _elem898; + for (int _i899 = 0; _i899 < _list897.size; ++_i899) { - _elem858 = new FieldSchema(); - _elem858.read(iprot); - struct.success.add(_elem858); + _elem898 = new FieldSchema(); + _elem898.read(iprot); + struct.success.add(_elem898); } } struct.setSuccessIsSet(true); @@ -42977,14 +43297,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list860 = iprot.readListBegin(); - struct.success = new ArrayList(_list860.size); - FieldSchema _elem861; - for (int _i862 = 0; _i862 < _list860.size; ++_i862) + org.apache.thrift.protocol.TList _list900 = iprot.readListBegin(); + struct.success = new ArrayList(_list900.size); + FieldSchema _elem901; + for (int _i902 = 0; _i902 < _list900.size; ++_i902) { - _elem861 = new FieldSchema(); - _elem861.read(iprot); - struct.success.add(_elem861); + _elem901 = new FieldSchema(); + _elem901.read(iprot); + struct.success.add(_elem901); } iprot.readListEnd(); } @@ -43037,9 +43357,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter863 : struct.success) + for (FieldSchema _iter903 : struct.success) { - _iter863.write(oprot); + _iter903.write(oprot); } oprot.writeListEnd(); } @@ -43094,9 +43414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter864 : struct.success) + for (FieldSchema _iter904 : struct.success) { - _iter864.write(oprot); + _iter904.write(oprot); } } } @@ -43117,14 +43437,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list865 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list865.size); - FieldSchema _elem866; - for (int _i867 = 0; _i867 < _list865.size; ++_i867) + org.apache.thrift.protocol.TList _list905 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list905.size); + FieldSchema _elem906; + for (int _i907 = 0; _i907 < _list905.size; ++_i907) { - _elem866 = new FieldSchema(); - _elem866.read(iprot); - struct.success.add(_elem866); + _elem906 = new FieldSchema(); + _elem906.read(iprot); + struct.success.add(_elem906); } } struct.setSuccessIsSet(true); @@ -44278,14 +44598,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list868 = iprot.readListBegin(); - struct.success = new ArrayList(_list868.size); - FieldSchema _elem869; - for (int _i870 = 0; _i870 < _list868.size; ++_i870) + org.apache.thrift.protocol.TList _list908 = iprot.readListBegin(); + struct.success = new ArrayList(_list908.size); + FieldSchema _elem909; + for (int _i910 = 0; _i910 < _list908.size; ++_i910) { - _elem869 = new FieldSchema(); - _elem869.read(iprot); - struct.success.add(_elem869); + _elem909 = new FieldSchema(); + _elem909.read(iprot); + struct.success.add(_elem909); } iprot.readListEnd(); } @@ -44338,9 +44658,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter871 : struct.success) + for (FieldSchema _iter911 : struct.success) { - _iter871.write(oprot); + _iter911.write(oprot); } oprot.writeListEnd(); } @@ -44395,9 +44715,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter872 : struct.success) + for (FieldSchema _iter912 : struct.success) { - _iter872.write(oprot); + _iter912.write(oprot); } } } @@ -44418,14 +44738,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list873 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list873.size); - FieldSchema _elem874; - for (int _i875 = 0; _i875 < _list873.size; ++_i875) + org.apache.thrift.protocol.TList _list913 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list913.size); + FieldSchema _elem914; + for (int _i915 = 0; _i915 < _list913.size; ++_i915) { - _elem874 = new FieldSchema(); - _elem874.read(iprot); - struct.success.add(_elem874); + _elem914 = new FieldSchema(); + _elem914.read(iprot); + struct.success.add(_elem914); } } struct.setSuccessIsSet(true); @@ -47352,14 +47672,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list876 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list876.size); - SQLPrimaryKey _elem877; - for (int _i878 = 0; _i878 < _list876.size; ++_i878) + org.apache.thrift.protocol.TList _list916 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list916.size); + SQLPrimaryKey _elem917; + for (int _i918 = 0; _i918 < _list916.size; ++_i918) { - _elem877 = new SQLPrimaryKey(); - _elem877.read(iprot); - struct.primaryKeys.add(_elem877); + _elem917 = new SQLPrimaryKey(); + _elem917.read(iprot); + struct.primaryKeys.add(_elem917); } iprot.readListEnd(); } @@ -47371,14 +47691,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list879 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list879.size); - SQLForeignKey _elem880; - for (int _i881 = 0; _i881 < _list879.size; ++_i881) + org.apache.thrift.protocol.TList _list919 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list919.size); + SQLForeignKey _elem920; + for (int _i921 = 0; _i921 < _list919.size; ++_i921) { - _elem880 = new SQLForeignKey(); - _elem880.read(iprot); - struct.foreignKeys.add(_elem880); + _elem920 = new SQLForeignKey(); + _elem920.read(iprot); + struct.foreignKeys.add(_elem920); } iprot.readListEnd(); } @@ -47390,14 +47710,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list882 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list882.size); - SQLUniqueConstraint _elem883; - for (int _i884 = 0; _i884 < _list882.size; ++_i884) + org.apache.thrift.protocol.TList _list922 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list922.size); + SQLUniqueConstraint _elem923; + for (int _i924 = 0; _i924 < _list922.size; ++_i924) { - _elem883 = new SQLUniqueConstraint(); - _elem883.read(iprot); - struct.uniqueConstraints.add(_elem883); + _elem923 = new SQLUniqueConstraint(); + _elem923.read(iprot); + struct.uniqueConstraints.add(_elem923); } iprot.readListEnd(); } @@ -47409,14 +47729,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list885 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list885.size); - SQLNotNullConstraint _elem886; - for (int _i887 = 0; _i887 < _list885.size; ++_i887) + org.apache.thrift.protocol.TList _list925 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list925.size); + SQLNotNullConstraint _elem926; + for (int _i927 = 0; _i927 < _list925.size; ++_i927) { - _elem886 = new SQLNotNullConstraint(); - _elem886.read(iprot); - struct.notNullConstraints.add(_elem886); + _elem926 = new SQLNotNullConstraint(); + _elem926.read(iprot); + struct.notNullConstraints.add(_elem926); } iprot.readListEnd(); } @@ -47447,9 +47767,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter888 : struct.primaryKeys) + for (SQLPrimaryKey _iter928 : struct.primaryKeys) { - _iter888.write(oprot); + _iter928.write(oprot); } oprot.writeListEnd(); } @@ -47459,9 +47779,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter889 : struct.foreignKeys) + for (SQLForeignKey _iter929 : struct.foreignKeys) { - _iter889.write(oprot); + _iter929.write(oprot); } oprot.writeListEnd(); } @@ -47471,9 +47791,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter890 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter930 : struct.uniqueConstraints) { - _iter890.write(oprot); + _iter930.write(oprot); } oprot.writeListEnd(); } @@ -47483,9 +47803,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter891 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter931 : struct.notNullConstraints) { - _iter891.write(oprot); + _iter931.write(oprot); } oprot.writeListEnd(); } @@ -47531,36 +47851,36 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter892 : struct.primaryKeys) + for (SQLPrimaryKey _iter932 : struct.primaryKeys) { - _iter892.write(oprot); + _iter932.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter893 : struct.foreignKeys) + for (SQLForeignKey _iter933 : struct.foreignKeys) { - _iter893.write(oprot); + _iter933.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter894 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter934 : struct.uniqueConstraints) { - _iter894.write(oprot); + _iter934.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter895 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter935 : struct.notNullConstraints) { - _iter895.write(oprot); + _iter935.write(oprot); } } } @@ -47577,56 +47897,56 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list896 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list896.size); - SQLPrimaryKey _elem897; - for (int _i898 = 0; _i898 < _list896.size; ++_i898) + org.apache.thrift.protocol.TList _list936 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list936.size); + SQLPrimaryKey _elem937; + for (int _i938 = 0; _i938 < _list936.size; ++_i938) { - _elem897 = new SQLPrimaryKey(); - _elem897.read(iprot); - struct.primaryKeys.add(_elem897); + _elem937 = new SQLPrimaryKey(); + _elem937.read(iprot); + struct.primaryKeys.add(_elem937); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list899 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list899.size); - SQLForeignKey _elem900; - for (int _i901 = 0; _i901 < _list899.size; ++_i901) + org.apache.thrift.protocol.TList _list939 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list939.size); + SQLForeignKey _elem940; + for (int _i941 = 0; _i941 < _list939.size; ++_i941) { - _elem900 = new SQLForeignKey(); - _elem900.read(iprot); - struct.foreignKeys.add(_elem900); + _elem940 = new SQLForeignKey(); + _elem940.read(iprot); + struct.foreignKeys.add(_elem940); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list902 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list902.size); - SQLUniqueConstraint _elem903; - for (int _i904 = 0; _i904 < _list902.size; ++_i904) + org.apache.thrift.protocol.TList _list942 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list942.size); + SQLUniqueConstraint _elem943; + for (int _i944 = 0; _i944 < _list942.size; ++_i944) { - _elem903 = new SQLUniqueConstraint(); - _elem903.read(iprot); - struct.uniqueConstraints.add(_elem903); + _elem943 = new SQLUniqueConstraint(); + _elem943.read(iprot); + struct.uniqueConstraints.add(_elem943); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list905 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list905.size); - SQLNotNullConstraint _elem906; - for (int _i907 = 0; _i907 < _list905.size; ++_i907) + org.apache.thrift.protocol.TList _list945 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list945.size); + SQLNotNullConstraint _elem946; + for (int _i947 = 0; _i947 < _list945.size; ++_i947) { - _elem906 = new SQLNotNullConstraint(); - _elem906.read(iprot); - struct.notNullConstraints.add(_elem906); + _elem946 = new SQLNotNullConstraint(); + _elem946.read(iprot); + struct.notNullConstraints.add(_elem946); } } struct.setNotNullConstraintsIsSet(true); @@ -55118,13 +55438,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args case 3: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list908 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list908.size); - String _elem909; - for (int _i910 = 0; _i910 < _list908.size; ++_i910) + org.apache.thrift.protocol.TList _list948 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list948.size); + String _elem949; + for (int _i950 = 0; _i950 < _list948.size; ++_i950) { - _elem909 = iprot.readString(); - struct.partNames.add(_elem909); + _elem949 = iprot.readString(); + struct.partNames.add(_elem949); } iprot.readListEnd(); } @@ -55160,9 +55480,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter911 : struct.partNames) + for (String _iter951 : struct.partNames) { - oprot.writeString(_iter911); + oprot.writeString(_iter951); } oprot.writeListEnd(); } @@ -55205,9 +55525,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter912 : struct.partNames) + for (String _iter952 : struct.partNames) { - oprot.writeString(_iter912); + oprot.writeString(_iter952); } } } @@ -55227,13 +55547,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list913 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list913.size); - String _elem914; - for (int _i915 = 0; _i915 < _list913.size; ++_i915) + org.apache.thrift.protocol.TList _list953 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list953.size); + String _elem954; + for (int _i955 = 0; _i955 < _list953.size; ++_i955) { - _elem914 = iprot.readString(); - struct.partNames.add(_elem914); + _elem954 = iprot.readString(); + struct.partNames.add(_elem954); } } struct.setPartNamesIsSet(true); @@ -56458,13 +56778,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list916 = iprot.readListBegin(); - struct.success = new ArrayList(_list916.size); - String _elem917; - for (int _i918 = 0; _i918 < _list916.size; ++_i918) + org.apache.thrift.protocol.TList _list956 = iprot.readListBegin(); + struct.success = new ArrayList(_list956.size); + String _elem957; + for (int _i958 = 0; _i958 < _list956.size; ++_i958) { - _elem917 = iprot.readString(); - struct.success.add(_elem917); + _elem957 = iprot.readString(); + struct.success.add(_elem957); } iprot.readListEnd(); } @@ -56499,9 +56819,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter919 : struct.success) + for (String _iter959 : struct.success) { - oprot.writeString(_iter919); + oprot.writeString(_iter959); } oprot.writeListEnd(); } @@ -56540,9 +56860,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter920 : struct.success) + for (String _iter960 : struct.success) { - oprot.writeString(_iter920); + oprot.writeString(_iter960); } } } @@ -56557,13 +56877,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list921 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list921.size); - String _elem922; - for (int _i923 = 0; _i923 < _list921.size; ++_i923) + org.apache.thrift.protocol.TList _list961 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list961.size); + String _elem962; + for (int _i963 = 0; _i963 < _list961.size; ++_i963) { - _elem922 = iprot.readString(); - struct.success.add(_elem922); + _elem962 = iprot.readString(); + struct.success.add(_elem962); } } struct.setSuccessIsSet(true); @@ -57537,13 +57857,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list924 = iprot.readListBegin(); - struct.success = new ArrayList(_list924.size); - String _elem925; - for (int _i926 = 0; _i926 < _list924.size; ++_i926) + org.apache.thrift.protocol.TList _list964 = iprot.readListBegin(); + struct.success = new ArrayList(_list964.size); + String _elem965; + for (int _i966 = 0; _i966 < _list964.size; ++_i966) { - _elem925 = iprot.readString(); - struct.success.add(_elem925); + _elem965 = iprot.readString(); + struct.success.add(_elem965); } iprot.readListEnd(); } @@ -57578,9 +57898,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter927 : struct.success) + for (String _iter967 : struct.success) { - oprot.writeString(_iter927); + oprot.writeString(_iter967); } oprot.writeListEnd(); } @@ -57619,9 +57939,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter928 : struct.success) + for (String _iter968 : struct.success) { - oprot.writeString(_iter928); + oprot.writeString(_iter968); } } } @@ -57636,13 +57956,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list929 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list929.size); - String _elem930; - for (int _i931 = 0; _i931 < _list929.size; ++_i931) + org.apache.thrift.protocol.TList _list969 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list969.size); + String _elem970; + for (int _i971 = 0; _i971 < _list969.size; ++_i971) { - _elem930 = iprot.readString(); - struct.success.add(_elem930); + _elem970 = iprot.readString(); + struct.success.add(_elem970); } } struct.setSuccessIsSet(true); @@ -58408,13 +58728,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list932 = iprot.readListBegin(); - struct.success = new ArrayList(_list932.size); - String _elem933; - for (int _i934 = 0; _i934 < _list932.size; ++_i934) + org.apache.thrift.protocol.TList _list972 = iprot.readListBegin(); + struct.success = new ArrayList(_list972.size); + String _elem973; + for (int _i974 = 0; _i974 < _list972.size; ++_i974) { - _elem933 = iprot.readString(); - struct.success.add(_elem933); + _elem973 = iprot.readString(); + struct.success.add(_elem973); } iprot.readListEnd(); } @@ -58449,9 +58769,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter935 : struct.success) + for (String _iter975 : struct.success) { - oprot.writeString(_iter935); + oprot.writeString(_iter975); } oprot.writeListEnd(); } @@ -58490,9 +58810,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter936 : struct.success) + for (String _iter976 : struct.success) { - oprot.writeString(_iter936); + oprot.writeString(_iter976); } } } @@ -58507,13 +58827,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list937 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list937.size); - String _elem938; - for (int _i939 = 0; _i939 < _list937.size; ++_i939) + org.apache.thrift.protocol.TList _list977 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list977.size); + String _elem978; + for (int _i979 = 0; _i979 < _list977.size; ++_i979) { - _elem938 = iprot.readString(); - struct.success.add(_elem938); + _elem978 = iprot.readString(); + struct.success.add(_elem978); } } struct.setSuccessIsSet(true); @@ -59018,13 +59338,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list940 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list940.size); - String _elem941; - for (int _i942 = 0; _i942 < _list940.size; ++_i942) + org.apache.thrift.protocol.TList _list980 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list980.size); + String _elem981; + for (int _i982 = 0; _i982 < _list980.size; ++_i982) { - _elem941 = iprot.readString(); - struct.tbl_types.add(_elem941); + _elem981 = iprot.readString(); + struct.tbl_types.add(_elem981); } iprot.readListEnd(); } @@ -59060,9 +59380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter943 : struct.tbl_types) + for (String _iter983 : struct.tbl_types) { - oprot.writeString(_iter943); + oprot.writeString(_iter983); } oprot.writeListEnd(); } @@ -59105,9 +59425,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter944 : struct.tbl_types) + for (String _iter984 : struct.tbl_types) { - oprot.writeString(_iter944); + oprot.writeString(_iter984); } } } @@ -59127,13 +59447,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list945 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list945.size); - String _elem946; - for (int _i947 = 0; _i947 < _list945.size; ++_i947) + org.apache.thrift.protocol.TList _list985 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list985.size); + String _elem986; + for (int _i987 = 0; _i987 < _list985.size; ++_i987) { - _elem946 = iprot.readString(); - struct.tbl_types.add(_elem946); + _elem986 = iprot.readString(); + struct.tbl_types.add(_elem986); } } struct.setTbl_typesIsSet(true); @@ -59539,14 +59859,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list948 = iprot.readListBegin(); - struct.success = new ArrayList(_list948.size); - TableMeta _elem949; - for (int _i950 = 0; _i950 < _list948.size; ++_i950) + org.apache.thrift.protocol.TList _list988 = iprot.readListBegin(); + struct.success = new ArrayList(_list988.size); + TableMeta _elem989; + for (int _i990 = 0; _i990 < _list988.size; ++_i990) { - _elem949 = new TableMeta(); - _elem949.read(iprot); - struct.success.add(_elem949); + _elem989 = new TableMeta(); + _elem989.read(iprot); + struct.success.add(_elem989); } iprot.readListEnd(); } @@ -59581,9 +59901,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter951 : struct.success) + for (TableMeta _iter991 : struct.success) { - _iter951.write(oprot); + _iter991.write(oprot); } oprot.writeListEnd(); } @@ -59622,9 +59942,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter952 : struct.success) + for (TableMeta _iter992 : struct.success) { - _iter952.write(oprot); + _iter992.write(oprot); } } } @@ -59639,14 +59959,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list953 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list953.size); - TableMeta _elem954; - for (int _i955 = 0; _i955 < _list953.size; ++_i955) + org.apache.thrift.protocol.TList _list993 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list993.size); + TableMeta _elem994; + for (int _i995 = 0; _i995 < _list993.size; ++_i995) { - _elem954 = new TableMeta(); - _elem954.read(iprot); - struct.success.add(_elem954); + _elem994 = new TableMeta(); + _elem994.read(iprot); + struct.success.add(_elem994); } } struct.setSuccessIsSet(true); @@ -60412,13 +60732,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list956 = iprot.readListBegin(); - struct.success = new ArrayList(_list956.size); - String _elem957; - for (int _i958 = 0; _i958 < _list956.size; ++_i958) + org.apache.thrift.protocol.TList _list996 = iprot.readListBegin(); + struct.success = new ArrayList(_list996.size); + String _elem997; + for (int _i998 = 0; _i998 < _list996.size; ++_i998) { - _elem957 = iprot.readString(); - struct.success.add(_elem957); + _elem997 = iprot.readString(); + struct.success.add(_elem997); } iprot.readListEnd(); } @@ -60453,9 +60773,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter959 : struct.success) + for (String _iter999 : struct.success) { - oprot.writeString(_iter959); + oprot.writeString(_iter999); } oprot.writeListEnd(); } @@ -60494,9 +60814,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter960 : struct.success) + for (String _iter1000 : struct.success) { - oprot.writeString(_iter960); + oprot.writeString(_iter1000); } } } @@ -60511,13 +60831,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list961 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list961.size); - String _elem962; - for (int _i963 = 0; _i963 < _list961.size; ++_i963) + org.apache.thrift.protocol.TList _list1001 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1001.size); + String _elem1002; + for (int _i1003 = 0; _i1003 < _list1001.size; ++_i1003) { - _elem962 = iprot.readString(); - struct.success.add(_elem962); + _elem1002 = iprot.readString(); + struct.success.add(_elem1002); } } struct.setSuccessIsSet(true); @@ -61970,13 +62290,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list964 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list964.size); - String _elem965; - for (int _i966 = 0; _i966 < _list964.size; ++_i966) + org.apache.thrift.protocol.TList _list1004 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1004.size); + String _elem1005; + for (int _i1006 = 0; _i1006 < _list1004.size; ++_i1006) { - _elem965 = iprot.readString(); - struct.tbl_names.add(_elem965); + _elem1005 = iprot.readString(); + struct.tbl_names.add(_elem1005); } iprot.readListEnd(); } @@ -62007,9 +62327,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter967 : struct.tbl_names) + for (String _iter1007 : struct.tbl_names) { - oprot.writeString(_iter967); + oprot.writeString(_iter1007); } oprot.writeListEnd(); } @@ -62046,9 +62366,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter968 : struct.tbl_names) + for (String _iter1008 : struct.tbl_names) { - oprot.writeString(_iter968); + oprot.writeString(_iter1008); } } } @@ -62064,13 +62384,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list969 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list969.size); - String _elem970; - for (int _i971 = 0; _i971 < _list969.size; ++_i971) + org.apache.thrift.protocol.TList _list1009 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1009.size); + String _elem1010; + for (int _i1011 = 0; _i1011 < _list1009.size; ++_i1011) { - _elem970 = iprot.readString(); - struct.tbl_names.add(_elem970); + _elem1010 = iprot.readString(); + struct.tbl_names.add(_elem1010); } } struct.setTbl_namesIsSet(true); @@ -62395,14 +62715,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list972 = iprot.readListBegin(); - struct.success = new ArrayList
(_list972.size); - Table _elem973; - for (int _i974 = 0; _i974 < _list972.size; ++_i974) + org.apache.thrift.protocol.TList _list1012 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1012.size); + Table _elem1013; + for (int _i1014 = 0; _i1014 < _list1012.size; ++_i1014) { - _elem973 = new Table(); - _elem973.read(iprot); - struct.success.add(_elem973); + _elem1013 = new Table(); + _elem1013.read(iprot); + struct.success.add(_elem1013); } iprot.readListEnd(); } @@ -62428,9 +62748,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter975 : struct.success) + for (Table _iter1015 : struct.success) { - _iter975.write(oprot); + _iter1015.write(oprot); } oprot.writeListEnd(); } @@ -62461,9 +62781,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter976 : struct.success) + for (Table _iter1016 : struct.success) { - _iter976.write(oprot); + _iter1016.write(oprot); } } } @@ -62475,14 +62795,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list977 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list977.size); - Table _elem978; - for (int _i979 = 0; _i979 < _list977.size; ++_i979) + org.apache.thrift.protocol.TList _list1017 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1017.size); + Table _elem1018; + for (int _i1019 = 0; _i1019 < _list1017.size; ++_i1019) { - _elem978 = new Table(); - _elem978.read(iprot); - struct.success.add(_elem978); + _elem1018 = new Table(); + _elem1018.read(iprot); + struct.success.add(_elem1018); } } struct.setSuccessIsSet(true); @@ -64875,13 +65195,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list980 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list980.size); - String _elem981; - for (int _i982 = 0; _i982 < _list980.size; ++_i982) + org.apache.thrift.protocol.TList _list1020 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1020.size); + String _elem1021; + for (int _i1022 = 0; _i1022 < _list1020.size; ++_i1022) { - _elem981 = iprot.readString(); - struct.tbl_names.add(_elem981); + _elem1021 = iprot.readString(); + struct.tbl_names.add(_elem1021); } iprot.readListEnd(); } @@ -64912,9 +65232,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter983 : struct.tbl_names) + for (String _iter1023 : struct.tbl_names) { - oprot.writeString(_iter983); + oprot.writeString(_iter1023); } oprot.writeListEnd(); } @@ -64951,9 +65271,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter984 : struct.tbl_names) + for (String _iter1024 : struct.tbl_names) { - oprot.writeString(_iter984); + oprot.writeString(_iter1024); } } } @@ -64969,13 +65289,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list985 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list985.size); - String _elem986; - for (int _i987 = 0; _i987 < _list985.size; ++_i987) + org.apache.thrift.protocol.TList _list1025 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1025.size); + String _elem1026; + for (int _i1027 = 0; _i1027 < _list1025.size; ++_i1027) { - _elem986 = iprot.readString(); - struct.tbl_names.add(_elem986); + _elem1026 = iprot.readString(); + struct.tbl_names.add(_elem1026); } } struct.setTbl_namesIsSet(true); @@ -65548,16 +65868,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map988 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map988.size); - String _key989; - Materialization _val990; - for (int _i991 = 0; _i991 < _map988.size; ++_i991) + org.apache.thrift.protocol.TMap _map1028 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1028.size); + String _key1029; + Materialization _val1030; + for (int _i1031 = 0; _i1031 < _map1028.size; ++_i1031) { - _key989 = iprot.readString(); - _val990 = new Materialization(); - _val990.read(iprot); - struct.success.put(_key989, _val990); + _key1029 = iprot.readString(); + _val1030 = new Materialization(); + _val1030.read(iprot); + struct.success.put(_key1029, _val1030); } iprot.readMapEnd(); } @@ -65610,10 +65930,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter992 : struct.success.entrySet()) + for (Map.Entry _iter1032 : struct.success.entrySet()) { - oprot.writeString(_iter992.getKey()); - _iter992.getValue().write(oprot); + oprot.writeString(_iter1032.getKey()); + _iter1032.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -65668,10 +65988,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter993 : struct.success.entrySet()) + for (Map.Entry _iter1033 : struct.success.entrySet()) { - oprot.writeString(_iter993.getKey()); - _iter993.getValue().write(oprot); + oprot.writeString(_iter1033.getKey()); + _iter1033.getValue().write(oprot); } } } @@ -65692,16 +66012,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map994 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map994.size); - String _key995; - Materialization _val996; - for (int _i997 = 0; _i997 < _map994.size; ++_i997) + org.apache.thrift.protocol.TMap _map1034 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map1034.size); + String _key1035; + Materialization _val1036; + for (int _i1037 = 0; _i1037 < _map1034.size; ++_i1037) { - _key995 = iprot.readString(); - _val996 = new Materialization(); - _val996.read(iprot); - struct.success.put(_key995, _val996); + _key1035 = iprot.readString(); + _val1036 = new Materialization(); + _val1036.read(iprot); + struct.success.put(_key1035, _val1036); } } struct.setSuccessIsSet(true); @@ -66847,13 +67167,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list998 = iprot.readListBegin(); - struct.success = new ArrayList(_list998.size); - String _elem999; - for (int _i1000 = 0; _i1000 < _list998.size; ++_i1000) + org.apache.thrift.protocol.TList _list1038 = iprot.readListBegin(); + struct.success = new ArrayList(_list1038.size); + String _elem1039; + for (int _i1040 = 0; _i1040 < _list1038.size; ++_i1040) { - _elem999 = iprot.readString(); - struct.success.add(_elem999); + _elem1039 = iprot.readString(); + struct.success.add(_elem1039); } iprot.readListEnd(); } @@ -66906,9 +67226,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1001 : struct.success) + for (String _iter1041 : struct.success) { - oprot.writeString(_iter1001); + oprot.writeString(_iter1041); } oprot.writeListEnd(); } @@ -66963,9 +67283,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1002 : struct.success) + for (String _iter1042 : struct.success) { - oprot.writeString(_iter1002); + oprot.writeString(_iter1042); } } } @@ -66986,13 +67306,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1003 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1003.size); - String _elem1004; - for (int _i1005 = 0; _i1005 < _list1003.size; ++_i1005) + org.apache.thrift.protocol.TList _list1043 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1043.size); + String _elem1044; + for (int _i1045 = 0; _i1045 < _list1043.size; ++_i1045) { - _elem1004 = iprot.readString(); - struct.success.add(_elem1004); + _elem1044 = iprot.readString(); + struct.success.add(_elem1044); } } struct.setSuccessIsSet(true); @@ -72851,14 +73171,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1006 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1006.size); - Partition _elem1007; - for (int _i1008 = 0; _i1008 < _list1006.size; ++_i1008) + org.apache.thrift.protocol.TList _list1046 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1046.size); + Partition _elem1047; + for (int _i1048 = 0; _i1048 < _list1046.size; ++_i1048) { - _elem1007 = new Partition(); - _elem1007.read(iprot); - struct.new_parts.add(_elem1007); + _elem1047 = new Partition(); + _elem1047.read(iprot); + struct.new_parts.add(_elem1047); } iprot.readListEnd(); } @@ -72884,9 +73204,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1009 : struct.new_parts) + for (Partition _iter1049 : struct.new_parts) { - _iter1009.write(oprot); + _iter1049.write(oprot); } oprot.writeListEnd(); } @@ -72917,9 +73237,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1010 : struct.new_parts) + for (Partition _iter1050 : struct.new_parts) { - _iter1010.write(oprot); + _iter1050.write(oprot); } } } @@ -72931,14 +73251,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1011 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1011.size); - Partition _elem1012; - for (int _i1013 = 0; _i1013 < _list1011.size; ++_i1013) + org.apache.thrift.protocol.TList _list1051 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1051.size); + Partition _elem1052; + for (int _i1053 = 0; _i1053 < _list1051.size; ++_i1053) { - _elem1012 = new Partition(); - _elem1012.read(iprot); - struct.new_parts.add(_elem1012); + _elem1052 = new Partition(); + _elem1052.read(iprot); + struct.new_parts.add(_elem1052); } } struct.setNew_partsIsSet(true); @@ -73939,14 +74259,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1014 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1014.size); - PartitionSpec _elem1015; - for (int _i1016 = 0; _i1016 < _list1014.size; ++_i1016) + org.apache.thrift.protocol.TList _list1054 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1054.size); + PartitionSpec _elem1055; + for (int _i1056 = 0; _i1056 < _list1054.size; ++_i1056) { - _elem1015 = new PartitionSpec(); - _elem1015.read(iprot); - struct.new_parts.add(_elem1015); + _elem1055 = new PartitionSpec(); + _elem1055.read(iprot); + struct.new_parts.add(_elem1055); } iprot.readListEnd(); } @@ -73972,9 +74292,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1017 : struct.new_parts) + for (PartitionSpec _iter1057 : struct.new_parts) { - _iter1017.write(oprot); + _iter1057.write(oprot); } oprot.writeListEnd(); } @@ -74005,9 +74325,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1018 : struct.new_parts) + for (PartitionSpec _iter1058 : struct.new_parts) { - _iter1018.write(oprot); + _iter1058.write(oprot); } } } @@ -74019,14 +74339,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1019 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1019.size); - PartitionSpec _elem1020; - for (int _i1021 = 0; _i1021 < _list1019.size; ++_i1021) + org.apache.thrift.protocol.TList _list1059 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1059.size); + PartitionSpec _elem1060; + for (int _i1061 = 0; _i1061 < _list1059.size; ++_i1061) { - _elem1020 = new PartitionSpec(); - _elem1020.read(iprot); - struct.new_parts.add(_elem1020); + _elem1060 = new PartitionSpec(); + _elem1060.read(iprot); + struct.new_parts.add(_elem1060); } } struct.setNew_partsIsSet(true); @@ -75202,13 +75522,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1022 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1022.size); - String _elem1023; - for (int _i1024 = 0; _i1024 < _list1022.size; ++_i1024) + org.apache.thrift.protocol.TList _list1062 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1062.size); + String _elem1063; + for (int _i1064 = 0; _i1064 < _list1062.size; ++_i1064) { - _elem1023 = iprot.readString(); - struct.part_vals.add(_elem1023); + _elem1063 = iprot.readString(); + struct.part_vals.add(_elem1063); } iprot.readListEnd(); } @@ -75244,9 +75564,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1025 : struct.part_vals) + for (String _iter1065 : struct.part_vals) { - oprot.writeString(_iter1025); + oprot.writeString(_iter1065); } oprot.writeListEnd(); } @@ -75289,9 +75609,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1026 : struct.part_vals) + for (String _iter1066 : struct.part_vals) { - oprot.writeString(_iter1026); + oprot.writeString(_iter1066); } } } @@ -75311,13 +75631,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1027 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1027.size); - String _elem1028; - for (int _i1029 = 0; _i1029 < _list1027.size; ++_i1029) + org.apache.thrift.protocol.TList _list1067 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1067.size); + String _elem1068; + for (int _i1069 = 0; _i1069 < _list1067.size; ++_i1069) { - _elem1028 = iprot.readString(); - struct.part_vals.add(_elem1028); + _elem1068 = iprot.readString(); + struct.part_vals.add(_elem1068); } } struct.setPart_valsIsSet(true); @@ -77626,13 +77946,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1030 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1030.size); - String _elem1031; - for (int _i1032 = 0; _i1032 < _list1030.size; ++_i1032) + org.apache.thrift.protocol.TList _list1070 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1070.size); + String _elem1071; + for (int _i1072 = 0; _i1072 < _list1070.size; ++_i1072) { - _elem1031 = iprot.readString(); - struct.part_vals.add(_elem1031); + _elem1071 = iprot.readString(); + struct.part_vals.add(_elem1071); } iprot.readListEnd(); } @@ -77677,9 +77997,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1033 : struct.part_vals) + for (String _iter1073 : struct.part_vals) { - oprot.writeString(_iter1033); + oprot.writeString(_iter1073); } oprot.writeListEnd(); } @@ -77730,9 +78050,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1034 : struct.part_vals) + for (String _iter1074 : struct.part_vals) { - oprot.writeString(_iter1034); + oprot.writeString(_iter1074); } } } @@ -77755,13 +78075,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1035 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1035.size); - String _elem1036; - for (int _i1037 = 0; _i1037 < _list1035.size; ++_i1037) + org.apache.thrift.protocol.TList _list1075 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1075.size); + String _elem1076; + for (int _i1077 = 0; _i1077 < _list1075.size; ++_i1077) { - _elem1036 = iprot.readString(); - struct.part_vals.add(_elem1036); + _elem1076 = iprot.readString(); + struct.part_vals.add(_elem1076); } } struct.setPart_valsIsSet(true); @@ -81631,13 +81951,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1038 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1038.size); - String _elem1039; - for (int _i1040 = 0; _i1040 < _list1038.size; ++_i1040) + org.apache.thrift.protocol.TList _list1078 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1078.size); + String _elem1079; + for (int _i1080 = 0; _i1080 < _list1078.size; ++_i1080) { - _elem1039 = iprot.readString(); - struct.part_vals.add(_elem1039); + _elem1079 = iprot.readString(); + struct.part_vals.add(_elem1079); } iprot.readListEnd(); } @@ -81681,9 +82001,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1041 : struct.part_vals) + for (String _iter1081 : struct.part_vals) { - oprot.writeString(_iter1041); + oprot.writeString(_iter1081); } oprot.writeListEnd(); } @@ -81732,9 +82052,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1042 : struct.part_vals) + for (String _iter1082 : struct.part_vals) { - oprot.writeString(_iter1042); + oprot.writeString(_iter1082); } } } @@ -81757,13 +82077,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1043 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1043.size); - String _elem1044; - for (int _i1045 = 0; _i1045 < _list1043.size; ++_i1045) + org.apache.thrift.protocol.TList _list1083 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1083.size); + String _elem1084; + for (int _i1085 = 0; _i1085 < _list1083.size; ++_i1085) { - _elem1044 = iprot.readString(); - struct.part_vals.add(_elem1044); + _elem1084 = iprot.readString(); + struct.part_vals.add(_elem1084); } } struct.setPart_valsIsSet(true); @@ -83002,13 +83322,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1046 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1046.size); - String _elem1047; - for (int _i1048 = 0; _i1048 < _list1046.size; ++_i1048) + org.apache.thrift.protocol.TList _list1086 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1086.size); + String _elem1087; + for (int _i1088 = 0; _i1088 < _list1086.size; ++_i1088) { - _elem1047 = iprot.readString(); - struct.part_vals.add(_elem1047); + _elem1087 = iprot.readString(); + struct.part_vals.add(_elem1087); } iprot.readListEnd(); } @@ -83061,9 +83381,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1049 : struct.part_vals) + for (String _iter1089 : struct.part_vals) { - oprot.writeString(_iter1049); + oprot.writeString(_iter1089); } oprot.writeListEnd(); } @@ -83120,9 +83440,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1050 : struct.part_vals) + for (String _iter1090 : struct.part_vals) { - oprot.writeString(_iter1050); + oprot.writeString(_iter1090); } } } @@ -83148,13 +83468,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1051 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1051.size); - String _elem1052; - for (int _i1053 = 0; _i1053 < _list1051.size; ++_i1053) + org.apache.thrift.protocol.TList _list1091 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1091.size); + String _elem1092; + for (int _i1093 = 0; _i1093 < _list1091.size; ++_i1093) { - _elem1052 = iprot.readString(); - struct.part_vals.add(_elem1052); + _elem1092 = iprot.readString(); + struct.part_vals.add(_elem1092); } } struct.setPart_valsIsSet(true); @@ -87756,13 +88076,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1054 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1054.size); - String _elem1055; - for (int _i1056 = 0; _i1056 < _list1054.size; ++_i1056) + org.apache.thrift.protocol.TList _list1094 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1094.size); + String _elem1095; + for (int _i1096 = 0; _i1096 < _list1094.size; ++_i1096) { - _elem1055 = iprot.readString(); - struct.part_vals.add(_elem1055); + _elem1095 = iprot.readString(); + struct.part_vals.add(_elem1095); } iprot.readListEnd(); } @@ -87798,9 +88118,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1057 : struct.part_vals) + for (String _iter1097 : struct.part_vals) { - oprot.writeString(_iter1057); + oprot.writeString(_iter1097); } oprot.writeListEnd(); } @@ -87843,9 +88163,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1058 : struct.part_vals) + for (String _iter1098 : struct.part_vals) { - oprot.writeString(_iter1058); + oprot.writeString(_iter1098); } } } @@ -87865,13 +88185,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1059 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1059.size); - String _elem1060; - for (int _i1061 = 0; _i1061 < _list1059.size; ++_i1061) + org.apache.thrift.protocol.TList _list1099 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1099.size); + String _elem1100; + for (int _i1101 = 0; _i1101 < _list1099.size; ++_i1101) { - _elem1060 = iprot.readString(); - struct.part_vals.add(_elem1060); + _elem1100 = iprot.readString(); + struct.part_vals.add(_elem1100); } } struct.setPart_valsIsSet(true); @@ -89089,15 +89409,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1062 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1062.size); - String _key1063; - String _val1064; - for (int _i1065 = 0; _i1065 < _map1062.size; ++_i1065) + org.apache.thrift.protocol.TMap _map1102 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1102.size); + String _key1103; + String _val1104; + for (int _i1105 = 0; _i1105 < _map1102.size; ++_i1105) { - _key1063 = iprot.readString(); - _val1064 = iprot.readString(); - struct.partitionSpecs.put(_key1063, _val1064); + _key1103 = iprot.readString(); + _val1104 = iprot.readString(); + struct.partitionSpecs.put(_key1103, _val1104); } iprot.readMapEnd(); } @@ -89155,10 +89475,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1066 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1106 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1066.getKey()); - oprot.writeString(_iter1066.getValue()); + oprot.writeString(_iter1106.getKey()); + oprot.writeString(_iter1106.getValue()); } oprot.writeMapEnd(); } @@ -89221,10 +89541,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1067 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1107 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1067.getKey()); - oprot.writeString(_iter1067.getValue()); + oprot.writeString(_iter1107.getKey()); + oprot.writeString(_iter1107.getValue()); } } } @@ -89248,15 +89568,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1068 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1068.size); - String _key1069; - String _val1070; - for (int _i1071 = 0; _i1071 < _map1068.size; ++_i1071) + org.apache.thrift.protocol.TMap _map1108 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1108.size); + String _key1109; + String _val1110; + for (int _i1111 = 0; _i1111 < _map1108.size; ++_i1111) { - _key1069 = iprot.readString(); - _val1070 = iprot.readString(); - struct.partitionSpecs.put(_key1069, _val1070); + _key1109 = iprot.readString(); + _val1110 = iprot.readString(); + struct.partitionSpecs.put(_key1109, _val1110); } } struct.setPartitionSpecsIsSet(true); @@ -90702,15 +91022,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1072 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1072.size); - String _key1073; - String _val1074; - for (int _i1075 = 0; _i1075 < _map1072.size; ++_i1075) + org.apache.thrift.protocol.TMap _map1112 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1112.size); + String _key1113; + String _val1114; + for (int _i1115 = 0; _i1115 < _map1112.size; ++_i1115) { - _key1073 = iprot.readString(); - _val1074 = iprot.readString(); - struct.partitionSpecs.put(_key1073, _val1074); + _key1113 = iprot.readString(); + _val1114 = iprot.readString(); + struct.partitionSpecs.put(_key1113, _val1114); } iprot.readMapEnd(); } @@ -90768,10 +91088,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1076 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1116 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1076.getKey()); - oprot.writeString(_iter1076.getValue()); + oprot.writeString(_iter1116.getKey()); + oprot.writeString(_iter1116.getValue()); } oprot.writeMapEnd(); } @@ -90834,10 +91154,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1077 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1117 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1077.getKey()); - oprot.writeString(_iter1077.getValue()); + oprot.writeString(_iter1117.getKey()); + oprot.writeString(_iter1117.getValue()); } } } @@ -90861,15 +91181,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1078 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1078.size); - String _key1079; - String _val1080; - for (int _i1081 = 0; _i1081 < _map1078.size; ++_i1081) + org.apache.thrift.protocol.TMap _map1118 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1118.size); + String _key1119; + String _val1120; + for (int _i1121 = 0; _i1121 < _map1118.size; ++_i1121) { - _key1079 = iprot.readString(); - _val1080 = iprot.readString(); - struct.partitionSpecs.put(_key1079, _val1080); + _key1119 = iprot.readString(); + _val1120 = iprot.readString(); + struct.partitionSpecs.put(_key1119, _val1120); } } struct.setPartitionSpecsIsSet(true); @@ -91534,14 +91854,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin(); - struct.success = new ArrayList(_list1082.size); - Partition _elem1083; - for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084) + org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin(); + struct.success = new ArrayList(_list1122.size); + Partition _elem1123; + for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124) { - _elem1083 = new Partition(); - _elem1083.read(iprot); - struct.success.add(_elem1083); + _elem1123 = new Partition(); + _elem1123.read(iprot); + struct.success.add(_elem1123); } iprot.readListEnd(); } @@ -91603,9 +91923,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1085 : struct.success) + for (Partition _iter1125 : struct.success) { - _iter1085.write(oprot); + _iter1125.write(oprot); } oprot.writeListEnd(); } @@ -91668,9 +91988,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1086 : struct.success) + for (Partition _iter1126 : struct.success) { - _iter1086.write(oprot); + _iter1126.write(oprot); } } } @@ -91694,14 +92014,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1087.size); - Partition _elem1088; - for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089) + org.apache.thrift.protocol.TList _list1127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1127.size); + Partition _elem1128; + for (int _i1129 = 0; _i1129 < _list1127.size; ++_i1129) { - _elem1088 = new Partition(); - _elem1088.read(iprot); - struct.success.add(_elem1088); + _elem1128 = new Partition(); + _elem1128.read(iprot); + struct.success.add(_elem1128); } } struct.setSuccessIsSet(true); @@ -92400,13 +92720,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1090.size); - String _elem1091; - for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092) + org.apache.thrift.protocol.TList _list1130 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1130.size); + String _elem1131; + for (int _i1132 = 0; _i1132 < _list1130.size; ++_i1132) { - _elem1091 = iprot.readString(); - struct.part_vals.add(_elem1091); + _elem1131 = iprot.readString(); + struct.part_vals.add(_elem1131); } iprot.readListEnd(); } @@ -92426,13 +92746,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1093 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1093.size); - String _elem1094; - for (int _i1095 = 0; _i1095 < _list1093.size; ++_i1095) + org.apache.thrift.protocol.TList _list1133 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1133.size); + String _elem1134; + for (int _i1135 = 0; _i1135 < _list1133.size; ++_i1135) { - _elem1094 = iprot.readString(); - struct.group_names.add(_elem1094); + _elem1134 = iprot.readString(); + struct.group_names.add(_elem1134); } iprot.readListEnd(); } @@ -92468,9 +92788,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1096 : struct.part_vals) + for (String _iter1136 : struct.part_vals) { - oprot.writeString(_iter1096); + oprot.writeString(_iter1136); } oprot.writeListEnd(); } @@ -92485,9 +92805,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1097 : struct.group_names) + for (String _iter1137 : struct.group_names) { - oprot.writeString(_iter1097); + oprot.writeString(_iter1137); } oprot.writeListEnd(); } @@ -92536,9 +92856,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1098 : struct.part_vals) + for (String _iter1138 : struct.part_vals) { - oprot.writeString(_iter1098); + oprot.writeString(_iter1138); } } } @@ -92548,9 +92868,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1099 : struct.group_names) + for (String _iter1139 : struct.group_names) { - oprot.writeString(_iter1099); + oprot.writeString(_iter1139); } } } @@ -92570,13 +92890,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1100 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1100.size); - String _elem1101; - for (int _i1102 = 0; _i1102 < _list1100.size; ++_i1102) + org.apache.thrift.protocol.TList _list1140 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1140.size); + String _elem1141; + for (int _i1142 = 0; _i1142 < _list1140.size; ++_i1142) { - _elem1101 = iprot.readString(); - struct.part_vals.add(_elem1101); + _elem1141 = iprot.readString(); + struct.part_vals.add(_elem1141); } } struct.setPart_valsIsSet(true); @@ -92587,13 +92907,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1103.size); - String _elem1104; - for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105) + org.apache.thrift.protocol.TList _list1143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1143.size); + String _elem1144; + for (int _i1145 = 0; _i1145 < _list1143.size; ++_i1145) { - _elem1104 = iprot.readString(); - struct.group_names.add(_elem1104); + _elem1144 = iprot.readString(); + struct.group_names.add(_elem1144); } } struct.setGroup_namesIsSet(true); @@ -95362,14 +95682,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin(); - struct.success = new ArrayList(_list1106.size); - Partition _elem1107; - for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108) + org.apache.thrift.protocol.TList _list1146 = iprot.readListBegin(); + struct.success = new ArrayList(_list1146.size); + Partition _elem1147; + for (int _i1148 = 0; _i1148 < _list1146.size; ++_i1148) { - _elem1107 = new Partition(); - _elem1107.read(iprot); - struct.success.add(_elem1107); + _elem1147 = new Partition(); + _elem1147.read(iprot); + struct.success.add(_elem1147); } iprot.readListEnd(); } @@ -95413,9 +95733,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1109 : struct.success) + for (Partition _iter1149 : struct.success) { - _iter1109.write(oprot); + _iter1149.write(oprot); } oprot.writeListEnd(); } @@ -95462,9 +95782,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1110 : struct.success) + for (Partition _iter1150 : struct.success) { - _iter1110.write(oprot); + _iter1150.write(oprot); } } } @@ -95482,14 +95802,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1111.size); - Partition _elem1112; - for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113) + org.apache.thrift.protocol.TList _list1151 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1151.size); + Partition _elem1152; + for (int _i1153 = 0; _i1153 < _list1151.size; ++_i1153) { - _elem1112 = new Partition(); - _elem1112.read(iprot); - struct.success.add(_elem1112); + _elem1152 = new Partition(); + _elem1152.read(iprot); + struct.success.add(_elem1152); } } struct.setSuccessIsSet(true); @@ -96179,13 +96499,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1114.size); - String _elem1115; - for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116) + org.apache.thrift.protocol.TList _list1154 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1154.size); + String _elem1155; + for (int _i1156 = 0; _i1156 < _list1154.size; ++_i1156) { - _elem1115 = iprot.readString(); - struct.group_names.add(_elem1115); + _elem1155 = iprot.readString(); + struct.group_names.add(_elem1155); } iprot.readListEnd(); } @@ -96229,9 +96549,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1117 : struct.group_names) + for (String _iter1157 : struct.group_names) { - oprot.writeString(_iter1117); + oprot.writeString(_iter1157); } oprot.writeListEnd(); } @@ -96286,9 +96606,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1118 : struct.group_names) + for (String _iter1158 : struct.group_names) { - oprot.writeString(_iter1118); + oprot.writeString(_iter1158); } } } @@ -96316,13 +96636,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1119.size); - String _elem1120; - for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121) + org.apache.thrift.protocol.TList _list1159 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1159.size); + String _elem1160; + for (int _i1161 = 0; _i1161 < _list1159.size; ++_i1161) { - _elem1120 = iprot.readString(); - struct.group_names.add(_elem1120); + _elem1160 = iprot.readString(); + struct.group_names.add(_elem1160); } } struct.setGroup_namesIsSet(true); @@ -96809,14 +97129,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin(); - struct.success = new ArrayList(_list1122.size); - Partition _elem1123; - for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124) + org.apache.thrift.protocol.TList _list1162 = iprot.readListBegin(); + struct.success = new ArrayList(_list1162.size); + Partition _elem1163; + for (int _i1164 = 0; _i1164 < _list1162.size; ++_i1164) { - _elem1123 = new Partition(); - _elem1123.read(iprot); - struct.success.add(_elem1123); + _elem1163 = new Partition(); + _elem1163.read(iprot); + struct.success.add(_elem1163); } iprot.readListEnd(); } @@ -96860,9 +97180,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1125 : struct.success) + for (Partition _iter1165 : struct.success) { - _iter1125.write(oprot); + _iter1165.write(oprot); } oprot.writeListEnd(); } @@ -96909,9 +97229,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1126 : struct.success) + for (Partition _iter1166 : struct.success) { - _iter1126.write(oprot); + _iter1166.write(oprot); } } } @@ -96929,14 +97249,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1127.size); - Partition _elem1128; - for (int _i1129 = 0; _i1129 < _list1127.size; ++_i1129) + org.apache.thrift.protocol.TList _list1167 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1167.size); + Partition _elem1168; + for (int _i1169 = 0; _i1169 < _list1167.size; ++_i1169) { - _elem1128 = new Partition(); - _elem1128.read(iprot); - struct.success.add(_elem1128); + _elem1168 = new Partition(); + _elem1168.read(iprot); + struct.success.add(_elem1168); } } struct.setSuccessIsSet(true); @@ -97999,14 +98319,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1130 = iprot.readListBegin(); - struct.success = new ArrayList(_list1130.size); - PartitionSpec _elem1131; - for (int _i1132 = 0; _i1132 < _list1130.size; ++_i1132) + org.apache.thrift.protocol.TList _list1170 = iprot.readListBegin(); + struct.success = new ArrayList(_list1170.size); + PartitionSpec _elem1171; + for (int _i1172 = 0; _i1172 < _list1170.size; ++_i1172) { - _elem1131 = new PartitionSpec(); - _elem1131.read(iprot); - struct.success.add(_elem1131); + _elem1171 = new PartitionSpec(); + _elem1171.read(iprot); + struct.success.add(_elem1171); } iprot.readListEnd(); } @@ -98050,9 +98370,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1133 : struct.success) + for (PartitionSpec _iter1173 : struct.success) { - _iter1133.write(oprot); + _iter1173.write(oprot); } oprot.writeListEnd(); } @@ -98099,9 +98419,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1134 : struct.success) + for (PartitionSpec _iter1174 : struct.success) { - _iter1134.write(oprot); + _iter1174.write(oprot); } } } @@ -98119,14 +98439,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1135 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1135.size); - PartitionSpec _elem1136; - for (int _i1137 = 0; _i1137 < _list1135.size; ++_i1137) + org.apache.thrift.protocol.TList _list1175 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1175.size); + PartitionSpec _elem1176; + for (int _i1177 = 0; _i1177 < _list1175.size; ++_i1177) { - _elem1136 = new PartitionSpec(); - _elem1136.read(iprot); - struct.success.add(_elem1136); + _elem1176 = new PartitionSpec(); + _elem1176.read(iprot); + struct.success.add(_elem1176); } } struct.setSuccessIsSet(true); @@ -99186,13 +99506,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1138 = iprot.readListBegin(); - struct.success = new ArrayList(_list1138.size); - String _elem1139; - for (int _i1140 = 0; _i1140 < _list1138.size; ++_i1140) + org.apache.thrift.protocol.TList _list1178 = iprot.readListBegin(); + struct.success = new ArrayList(_list1178.size); + String _elem1179; + for (int _i1180 = 0; _i1180 < _list1178.size; ++_i1180) { - _elem1139 = iprot.readString(); - struct.success.add(_elem1139); + _elem1179 = iprot.readString(); + struct.success.add(_elem1179); } iprot.readListEnd(); } @@ -99236,9 +99556,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1141 : struct.success) + for (String _iter1181 : struct.success) { - oprot.writeString(_iter1141); + oprot.writeString(_iter1181); } oprot.writeListEnd(); } @@ -99285,9 +99605,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1142 : struct.success) + for (String _iter1182 : struct.success) { - oprot.writeString(_iter1142); + oprot.writeString(_iter1182); } } } @@ -99305,13 +99625,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1143.size); - String _elem1144; - for (int _i1145 = 0; _i1145 < _list1143.size; ++_i1145) + org.apache.thrift.protocol.TList _list1183 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1183.size); + String _elem1184; + for (int _i1185 = 0; _i1185 < _list1183.size; ++_i1185) { - _elem1144 = iprot.readString(); - struct.success.add(_elem1144); + _elem1184 = iprot.readString(); + struct.success.add(_elem1184); } } struct.setSuccessIsSet(true); @@ -100842,13 +101162,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1146 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1146.size); - String _elem1147; - for (int _i1148 = 0; _i1148 < _list1146.size; ++_i1148) + org.apache.thrift.protocol.TList _list1186 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1186.size); + String _elem1187; + for (int _i1188 = 0; _i1188 < _list1186.size; ++_i1188) { - _elem1147 = iprot.readString(); - struct.part_vals.add(_elem1147); + _elem1187 = iprot.readString(); + struct.part_vals.add(_elem1187); } iprot.readListEnd(); } @@ -100892,9 +101212,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1149 : struct.part_vals) + for (String _iter1189 : struct.part_vals) { - oprot.writeString(_iter1149); + oprot.writeString(_iter1189); } oprot.writeListEnd(); } @@ -100943,9 +101263,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1150 : struct.part_vals) + for (String _iter1190 : struct.part_vals) { - oprot.writeString(_iter1150); + oprot.writeString(_iter1190); } } } @@ -100968,13 +101288,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1151 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1151.size); - String _elem1152; - for (int _i1153 = 0; _i1153 < _list1151.size; ++_i1153) + org.apache.thrift.protocol.TList _list1191 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1191.size); + String _elem1192; + for (int _i1193 = 0; _i1193 < _list1191.size; ++_i1193) { - _elem1152 = iprot.readString(); - struct.part_vals.add(_elem1152); + _elem1192 = iprot.readString(); + struct.part_vals.add(_elem1192); } } struct.setPart_valsIsSet(true); @@ -101465,14 +101785,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1154 = iprot.readListBegin(); - struct.success = new ArrayList(_list1154.size); - Partition _elem1155; - for (int _i1156 = 0; _i1156 < _list1154.size; ++_i1156) + org.apache.thrift.protocol.TList _list1194 = iprot.readListBegin(); + struct.success = new ArrayList(_list1194.size); + Partition _elem1195; + for (int _i1196 = 0; _i1196 < _list1194.size; ++_i1196) { - _elem1155 = new Partition(); - _elem1155.read(iprot); - struct.success.add(_elem1155); + _elem1195 = new Partition(); + _elem1195.read(iprot); + struct.success.add(_elem1195); } iprot.readListEnd(); } @@ -101516,9 +101836,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1157 : struct.success) + for (Partition _iter1197 : struct.success) { - _iter1157.write(oprot); + _iter1197.write(oprot); } oprot.writeListEnd(); } @@ -101565,9 +101885,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1158 : struct.success) + for (Partition _iter1198 : struct.success) { - _iter1158.write(oprot); + _iter1198.write(oprot); } } } @@ -101585,14 +101905,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1159 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1159.size); - Partition _elem1160; - for (int _i1161 = 0; _i1161 < _list1159.size; ++_i1161) + org.apache.thrift.protocol.TList _list1199 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1199.size); + Partition _elem1200; + for (int _i1201 = 0; _i1201 < _list1199.size; ++_i1201) { - _elem1160 = new Partition(); - _elem1160.read(iprot); - struct.success.add(_elem1160); + _elem1200 = new Partition(); + _elem1200.read(iprot); + struct.success.add(_elem1200); } } struct.setSuccessIsSet(true); @@ -102364,13 +102684,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1162 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1162.size); - String _elem1163; - for (int _i1164 = 0; _i1164 < _list1162.size; ++_i1164) + org.apache.thrift.protocol.TList _list1202 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1202.size); + String _elem1203; + for (int _i1204 = 0; _i1204 < _list1202.size; ++_i1204) { - _elem1163 = iprot.readString(); - struct.part_vals.add(_elem1163); + _elem1203 = iprot.readString(); + struct.part_vals.add(_elem1203); } iprot.readListEnd(); } @@ -102398,13 +102718,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1165 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1165.size); - String _elem1166; - for (int _i1167 = 0; _i1167 < _list1165.size; ++_i1167) + org.apache.thrift.protocol.TList _list1205 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1205.size); + String _elem1206; + for (int _i1207 = 0; _i1207 < _list1205.size; ++_i1207) { - _elem1166 = iprot.readString(); - struct.group_names.add(_elem1166); + _elem1206 = iprot.readString(); + struct.group_names.add(_elem1206); } iprot.readListEnd(); } @@ -102440,9 +102760,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1168 : struct.part_vals) + for (String _iter1208 : struct.part_vals) { - oprot.writeString(_iter1168); + oprot.writeString(_iter1208); } oprot.writeListEnd(); } @@ -102460,9 +102780,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1169 : struct.group_names) + for (String _iter1209 : struct.group_names) { - oprot.writeString(_iter1169); + oprot.writeString(_iter1209); } oprot.writeListEnd(); } @@ -102514,9 +102834,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1170 : struct.part_vals) + for (String _iter1210 : struct.part_vals) { - oprot.writeString(_iter1170); + oprot.writeString(_iter1210); } } } @@ -102529,9 +102849,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1171 : struct.group_names) + for (String _iter1211 : struct.group_names) { - oprot.writeString(_iter1171); + oprot.writeString(_iter1211); } } } @@ -102551,13 +102871,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1172 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1172.size); - String _elem1173; - for (int _i1174 = 0; _i1174 < _list1172.size; ++_i1174) + org.apache.thrift.protocol.TList _list1212 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1212.size); + String _elem1213; + for (int _i1214 = 0; _i1214 < _list1212.size; ++_i1214) { - _elem1173 = iprot.readString(); - struct.part_vals.add(_elem1173); + _elem1213 = iprot.readString(); + struct.part_vals.add(_elem1213); } } struct.setPart_valsIsSet(true); @@ -102572,13 +102892,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1175 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1175.size); - String _elem1176; - for (int _i1177 = 0; _i1177 < _list1175.size; ++_i1177) + org.apache.thrift.protocol.TList _list1215 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1215.size); + String _elem1216; + for (int _i1217 = 0; _i1217 < _list1215.size; ++_i1217) { - _elem1176 = iprot.readString(); - struct.group_names.add(_elem1176); + _elem1216 = iprot.readString(); + struct.group_names.add(_elem1216); } } struct.setGroup_namesIsSet(true); @@ -103065,14 +103385,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1178 = iprot.readListBegin(); - struct.success = new ArrayList(_list1178.size); - Partition _elem1179; - for (int _i1180 = 0; _i1180 < _list1178.size; ++_i1180) + org.apache.thrift.protocol.TList _list1218 = iprot.readListBegin(); + struct.success = new ArrayList(_list1218.size); + Partition _elem1219; + for (int _i1220 = 0; _i1220 < _list1218.size; ++_i1220) { - _elem1179 = new Partition(); - _elem1179.read(iprot); - struct.success.add(_elem1179); + _elem1219 = new Partition(); + _elem1219.read(iprot); + struct.success.add(_elem1219); } iprot.readListEnd(); } @@ -103116,9 +103436,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1181 : struct.success) + for (Partition _iter1221 : struct.success) { - _iter1181.write(oprot); + _iter1221.write(oprot); } oprot.writeListEnd(); } @@ -103165,9 +103485,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1182 : struct.success) + for (Partition _iter1222 : struct.success) { - _iter1182.write(oprot); + _iter1222.write(oprot); } } } @@ -103185,14 +103505,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1183 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1183.size); - Partition _elem1184; - for (int _i1185 = 0; _i1185 < _list1183.size; ++_i1185) + org.apache.thrift.protocol.TList _list1223 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1223.size); + Partition _elem1224; + for (int _i1225 = 0; _i1225 < _list1223.size; ++_i1225) { - _elem1184 = new Partition(); - _elem1184.read(iprot); - struct.success.add(_elem1184); + _elem1224 = new Partition(); + _elem1224.read(iprot); + struct.success.add(_elem1224); } } struct.setSuccessIsSet(true); @@ -103785,13 +104105,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1186 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1186.size); - String _elem1187; - for (int _i1188 = 0; _i1188 < _list1186.size; ++_i1188) + org.apache.thrift.protocol.TList _list1226 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1226.size); + String _elem1227; + for (int _i1228 = 0; _i1228 < _list1226.size; ++_i1228) { - _elem1187 = iprot.readString(); - struct.part_vals.add(_elem1187); + _elem1227 = iprot.readString(); + struct.part_vals.add(_elem1227); } iprot.readListEnd(); } @@ -103835,9 +104155,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1189 : struct.part_vals) + for (String _iter1229 : struct.part_vals) { - oprot.writeString(_iter1189); + oprot.writeString(_iter1229); } oprot.writeListEnd(); } @@ -103886,9 +104206,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1190 : struct.part_vals) + for (String _iter1230 : struct.part_vals) { - oprot.writeString(_iter1190); + oprot.writeString(_iter1230); } } } @@ -103911,13 +104231,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1191 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1191.size); - String _elem1192; - for (int _i1193 = 0; _i1193 < _list1191.size; ++_i1193) + org.apache.thrift.protocol.TList _list1231 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1231.size); + String _elem1232; + for (int _i1233 = 0; _i1233 < _list1231.size; ++_i1233) { - _elem1192 = iprot.readString(); - struct.part_vals.add(_elem1192); + _elem1232 = iprot.readString(); + struct.part_vals.add(_elem1232); } } struct.setPart_valsIsSet(true); @@ -104405,13 +104725,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1194 = iprot.readListBegin(); - struct.success = new ArrayList(_list1194.size); - String _elem1195; - for (int _i1196 = 0; _i1196 < _list1194.size; ++_i1196) + org.apache.thrift.protocol.TList _list1234 = iprot.readListBegin(); + struct.success = new ArrayList(_list1234.size); + String _elem1235; + for (int _i1236 = 0; _i1236 < _list1234.size; ++_i1236) { - _elem1195 = iprot.readString(); - struct.success.add(_elem1195); + _elem1235 = iprot.readString(); + struct.success.add(_elem1235); } iprot.readListEnd(); } @@ -104455,9 +104775,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1197 : struct.success) + for (String _iter1237 : struct.success) { - oprot.writeString(_iter1197); + oprot.writeString(_iter1237); } oprot.writeListEnd(); } @@ -104504,9 +104824,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1198 : struct.success) + for (String _iter1238 : struct.success) { - oprot.writeString(_iter1198); + oprot.writeString(_iter1238); } } } @@ -104524,13 +104844,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1199 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1199.size); - String _elem1200; - for (int _i1201 = 0; _i1201 < _list1199.size; ++_i1201) + org.apache.thrift.protocol.TList _list1239 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1239.size); + String _elem1240; + for (int _i1241 = 0; _i1241 < _list1239.size; ++_i1241) { - _elem1200 = iprot.readString(); - struct.success.add(_elem1200); + _elem1240 = iprot.readString(); + struct.success.add(_elem1240); } } struct.setSuccessIsSet(true); @@ -105697,14 +106017,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1202 = iprot.readListBegin(); - struct.success = new ArrayList(_list1202.size); - Partition _elem1203; - for (int _i1204 = 0; _i1204 < _list1202.size; ++_i1204) + org.apache.thrift.protocol.TList _list1242 = iprot.readListBegin(); + struct.success = new ArrayList(_list1242.size); + Partition _elem1243; + for (int _i1244 = 0; _i1244 < _list1242.size; ++_i1244) { - _elem1203 = new Partition(); - _elem1203.read(iprot); - struct.success.add(_elem1203); + _elem1243 = new Partition(); + _elem1243.read(iprot); + struct.success.add(_elem1243); } iprot.readListEnd(); } @@ -105748,9 +106068,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1205 : struct.success) + for (Partition _iter1245 : struct.success) { - _iter1205.write(oprot); + _iter1245.write(oprot); } oprot.writeListEnd(); } @@ -105797,9 +106117,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1206 : struct.success) + for (Partition _iter1246 : struct.success) { - _iter1206.write(oprot); + _iter1246.write(oprot); } } } @@ -105817,14 +106137,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1207 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1207.size); - Partition _elem1208; - for (int _i1209 = 0; _i1209 < _list1207.size; ++_i1209) + org.apache.thrift.protocol.TList _list1247 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1247.size); + Partition _elem1248; + for (int _i1249 = 0; _i1249 < _list1247.size; ++_i1249) { - _elem1208 = new Partition(); - _elem1208.read(iprot); - struct.success.add(_elem1208); + _elem1248 = new Partition(); + _elem1248.read(iprot); + struct.success.add(_elem1248); } } struct.setSuccessIsSet(true); @@ -106991,14 +107311,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1210 = iprot.readListBegin(); - struct.success = new ArrayList(_list1210.size); - PartitionSpec _elem1211; - for (int _i1212 = 0; _i1212 < _list1210.size; ++_i1212) + org.apache.thrift.protocol.TList _list1250 = iprot.readListBegin(); + struct.success = new ArrayList(_list1250.size); + PartitionSpec _elem1251; + for (int _i1252 = 0; _i1252 < _list1250.size; ++_i1252) { - _elem1211 = new PartitionSpec(); - _elem1211.read(iprot); - struct.success.add(_elem1211); + _elem1251 = new PartitionSpec(); + _elem1251.read(iprot); + struct.success.add(_elem1251); } iprot.readListEnd(); } @@ -107042,9 +107362,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1213 : struct.success) + for (PartitionSpec _iter1253 : struct.success) { - _iter1213.write(oprot); + _iter1253.write(oprot); } oprot.writeListEnd(); } @@ -107091,9 +107411,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1214 : struct.success) + for (PartitionSpec _iter1254 : struct.success) { - _iter1214.write(oprot); + _iter1254.write(oprot); } } } @@ -107111,14 +107431,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1215 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1215.size); - PartitionSpec _elem1216; - for (int _i1217 = 0; _i1217 < _list1215.size; ++_i1217) + org.apache.thrift.protocol.TList _list1255 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1255.size); + PartitionSpec _elem1256; + for (int _i1257 = 0; _i1257 < _list1255.size; ++_i1257) { - _elem1216 = new PartitionSpec(); - _elem1216.read(iprot); - struct.success.add(_elem1216); + _elem1256 = new PartitionSpec(); + _elem1256.read(iprot); + struct.success.add(_elem1256); } } struct.setSuccessIsSet(true); @@ -109702,13 +110022,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1218 = iprot.readListBegin(); - struct.names = new ArrayList(_list1218.size); - String _elem1219; - for (int _i1220 = 0; _i1220 < _list1218.size; ++_i1220) + org.apache.thrift.protocol.TList _list1258 = iprot.readListBegin(); + struct.names = new ArrayList(_list1258.size); + String _elem1259; + for (int _i1260 = 0; _i1260 < _list1258.size; ++_i1260) { - _elem1219 = iprot.readString(); - struct.names.add(_elem1219); + _elem1259 = iprot.readString(); + struct.names.add(_elem1259); } iprot.readListEnd(); } @@ -109744,9 +110064,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1221 : struct.names) + for (String _iter1261 : struct.names) { - oprot.writeString(_iter1221); + oprot.writeString(_iter1261); } oprot.writeListEnd(); } @@ -109789,9 +110109,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1222 : struct.names) + for (String _iter1262 : struct.names) { - oprot.writeString(_iter1222); + oprot.writeString(_iter1262); } } } @@ -109811,13 +110131,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1223 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1223.size); - String _elem1224; - for (int _i1225 = 0; _i1225 < _list1223.size; ++_i1225) + org.apache.thrift.protocol.TList _list1263 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1263.size); + String _elem1264; + for (int _i1265 = 0; _i1265 < _list1263.size; ++_i1265) { - _elem1224 = iprot.readString(); - struct.names.add(_elem1224); + _elem1264 = iprot.readString(); + struct.names.add(_elem1264); } } struct.setNamesIsSet(true); @@ -110304,14 +110624,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1226 = iprot.readListBegin(); - struct.success = new ArrayList(_list1226.size); - Partition _elem1227; - for (int _i1228 = 0; _i1228 < _list1226.size; ++_i1228) + org.apache.thrift.protocol.TList _list1266 = iprot.readListBegin(); + struct.success = new ArrayList(_list1266.size); + Partition _elem1267; + for (int _i1268 = 0; _i1268 < _list1266.size; ++_i1268) { - _elem1227 = new Partition(); - _elem1227.read(iprot); - struct.success.add(_elem1227); + _elem1267 = new Partition(); + _elem1267.read(iprot); + struct.success.add(_elem1267); } iprot.readListEnd(); } @@ -110355,9 +110675,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1229 : struct.success) + for (Partition _iter1269 : struct.success) { - _iter1229.write(oprot); + _iter1269.write(oprot); } oprot.writeListEnd(); } @@ -110404,9 +110724,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1230 : struct.success) + for (Partition _iter1270 : struct.success) { - _iter1230.write(oprot); + _iter1270.write(oprot); } } } @@ -110424,14 +110744,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1231 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1231.size); - Partition _elem1232; - for (int _i1233 = 0; _i1233 < _list1231.size; ++_i1233) + org.apache.thrift.protocol.TList _list1271 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1271.size); + Partition _elem1272; + for (int _i1273 = 0; _i1273 < _list1271.size; ++_i1273) { - _elem1232 = new Partition(); - _elem1232.read(iprot); - struct.success.add(_elem1232); + _elem1272 = new Partition(); + _elem1272.read(iprot); + struct.success.add(_elem1272); } } struct.setSuccessIsSet(true); @@ -111981,14 +112301,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1234 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1234.size); - Partition _elem1235; - for (int _i1236 = 0; _i1236 < _list1234.size; ++_i1236) + org.apache.thrift.protocol.TList _list1274 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1274.size); + Partition _elem1275; + for (int _i1276 = 0; _i1276 < _list1274.size; ++_i1276) { - _elem1235 = new Partition(); - _elem1235.read(iprot); - struct.new_parts.add(_elem1235); + _elem1275 = new Partition(); + _elem1275.read(iprot); + struct.new_parts.add(_elem1275); } iprot.readListEnd(); } @@ -112024,9 +112344,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1237 : struct.new_parts) + for (Partition _iter1277 : struct.new_parts) { - _iter1237.write(oprot); + _iter1277.write(oprot); } oprot.writeListEnd(); } @@ -112069,9 +112389,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1238 : struct.new_parts) + for (Partition _iter1278 : struct.new_parts) { - _iter1238.write(oprot); + _iter1278.write(oprot); } } } @@ -112091,14 +112411,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1239 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1239.size); - Partition _elem1240; - for (int _i1241 = 0; _i1241 < _list1239.size; ++_i1241) + org.apache.thrift.protocol.TList _list1279 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1279.size); + Partition _elem1280; + for (int _i1281 = 0; _i1281 < _list1279.size; ++_i1281) { - _elem1240 = new Partition(); - _elem1240.read(iprot); - struct.new_parts.add(_elem1240); + _elem1280 = new Partition(); + _elem1280.read(iprot); + struct.new_parts.add(_elem1280); } } struct.setNew_partsIsSet(true); @@ -113151,14 +113471,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1242 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1242.size); - Partition _elem1243; - for (int _i1244 = 0; _i1244 < _list1242.size; ++_i1244) + org.apache.thrift.protocol.TList _list1282 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1282.size); + Partition _elem1283; + for (int _i1284 = 0; _i1284 < _list1282.size; ++_i1284) { - _elem1243 = new Partition(); - _elem1243.read(iprot); - struct.new_parts.add(_elem1243); + _elem1283 = new Partition(); + _elem1283.read(iprot); + struct.new_parts.add(_elem1283); } iprot.readListEnd(); } @@ -113203,9 +113523,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1245 : struct.new_parts) + for (Partition _iter1285 : struct.new_parts) { - _iter1245.write(oprot); + _iter1285.write(oprot); } oprot.writeListEnd(); } @@ -113256,9 +113576,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1246 : struct.new_parts) + for (Partition _iter1286 : struct.new_parts) { - _iter1246.write(oprot); + _iter1286.write(oprot); } } } @@ -113281,14 +113601,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1247 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1247.size); - Partition _elem1248; - for (int _i1249 = 0; _i1249 < _list1247.size; ++_i1249) + org.apache.thrift.protocol.TList _list1287 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1287.size); + Partition _elem1288; + for (int _i1289 = 0; _i1289 < _list1287.size; ++_i1289) { - _elem1248 = new Partition(); - _elem1248.read(iprot); - struct.new_parts.add(_elem1248); + _elem1288 = new Partition(); + _elem1288.read(iprot); + struct.new_parts.add(_elem1288); } } struct.setNew_partsIsSet(true); @@ -115489,13 +115809,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1250 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1250.size); - String _elem1251; - for (int _i1252 = 0; _i1252 < _list1250.size; ++_i1252) + org.apache.thrift.protocol.TList _list1290 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1290.size); + String _elem1291; + for (int _i1292 = 0; _i1292 < _list1290.size; ++_i1292) { - _elem1251 = iprot.readString(); - struct.part_vals.add(_elem1251); + _elem1291 = iprot.readString(); + struct.part_vals.add(_elem1291); } iprot.readListEnd(); } @@ -115540,9 +115860,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1253 : struct.part_vals) + for (String _iter1293 : struct.part_vals) { - oprot.writeString(_iter1253); + oprot.writeString(_iter1293); } oprot.writeListEnd(); } @@ -115593,9 +115913,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1254 : struct.part_vals) + for (String _iter1294 : struct.part_vals) { - oprot.writeString(_iter1254); + oprot.writeString(_iter1294); } } } @@ -115618,13 +115938,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1255 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1255.size); - String _elem1256; - for (int _i1257 = 0; _i1257 < _list1255.size; ++_i1257) + org.apache.thrift.protocol.TList _list1295 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1295.size); + String _elem1296; + for (int _i1297 = 0; _i1297 < _list1295.size; ++_i1297) { - _elem1256 = iprot.readString(); - struct.part_vals.add(_elem1256); + _elem1296 = iprot.readString(); + struct.part_vals.add(_elem1296); } } struct.setPart_valsIsSet(true); @@ -116498,13 +116818,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1258 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1258.size); - String _elem1259; - for (int _i1260 = 0; _i1260 < _list1258.size; ++_i1260) + org.apache.thrift.protocol.TList _list1298 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1298.size); + String _elem1299; + for (int _i1300 = 0; _i1300 < _list1298.size; ++_i1300) { - _elem1259 = iprot.readString(); - struct.part_vals.add(_elem1259); + _elem1299 = iprot.readString(); + struct.part_vals.add(_elem1299); } iprot.readListEnd(); } @@ -116538,9 +116858,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1261 : struct.part_vals) + for (String _iter1301 : struct.part_vals) { - oprot.writeString(_iter1261); + oprot.writeString(_iter1301); } oprot.writeListEnd(); } @@ -116577,9 +116897,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1262 : struct.part_vals) + for (String _iter1302 : struct.part_vals) { - oprot.writeString(_iter1262); + oprot.writeString(_iter1302); } } } @@ -116594,13 +116914,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1263 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1263.size); - String _elem1264; - for (int _i1265 = 0; _i1265 < _list1263.size; ++_i1265) + org.apache.thrift.protocol.TList _list1303 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1303.size); + String _elem1304; + for (int _i1305 = 0; _i1305 < _list1303.size; ++_i1305) { - _elem1264 = iprot.readString(); - struct.part_vals.add(_elem1264); + _elem1304 = iprot.readString(); + struct.part_vals.add(_elem1304); } } struct.setPart_valsIsSet(true); @@ -118755,13 +119075,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1266 = iprot.readListBegin(); - struct.success = new ArrayList(_list1266.size); - String _elem1267; - for (int _i1268 = 0; _i1268 < _list1266.size; ++_i1268) + org.apache.thrift.protocol.TList _list1306 = iprot.readListBegin(); + struct.success = new ArrayList(_list1306.size); + String _elem1307; + for (int _i1308 = 0; _i1308 < _list1306.size; ++_i1308) { - _elem1267 = iprot.readString(); - struct.success.add(_elem1267); + _elem1307 = iprot.readString(); + struct.success.add(_elem1307); } iprot.readListEnd(); } @@ -118796,9 +119116,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1269 : struct.success) + for (String _iter1309 : struct.success) { - oprot.writeString(_iter1269); + oprot.writeString(_iter1309); } oprot.writeListEnd(); } @@ -118837,9 +119157,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1270 : struct.success) + for (String _iter1310 : struct.success) { - oprot.writeString(_iter1270); + oprot.writeString(_iter1310); } } } @@ -118854,13 +119174,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1271 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1271.size); - String _elem1272; - for (int _i1273 = 0; _i1273 < _list1271.size; ++_i1273) + org.apache.thrift.protocol.TList _list1311 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1311.size); + String _elem1312; + for (int _i1313 = 0; _i1313 < _list1311.size; ++_i1313) { - _elem1272 = iprot.readString(); - struct.success.add(_elem1272); + _elem1312 = iprot.readString(); + struct.success.add(_elem1312); } } struct.setSuccessIsSet(true); @@ -119623,15 +119943,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1274 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1274.size); - String _key1275; - String _val1276; - for (int _i1277 = 0; _i1277 < _map1274.size; ++_i1277) + org.apache.thrift.protocol.TMap _map1314 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1314.size); + String _key1315; + String _val1316; + for (int _i1317 = 0; _i1317 < _map1314.size; ++_i1317) { - _key1275 = iprot.readString(); - _val1276 = iprot.readString(); - struct.success.put(_key1275, _val1276); + _key1315 = iprot.readString(); + _val1316 = iprot.readString(); + struct.success.put(_key1315, _val1316); } iprot.readMapEnd(); } @@ -119666,10 +119986,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1278 : struct.success.entrySet()) + for (Map.Entry _iter1318 : struct.success.entrySet()) { - oprot.writeString(_iter1278.getKey()); - oprot.writeString(_iter1278.getValue()); + oprot.writeString(_iter1318.getKey()); + oprot.writeString(_iter1318.getValue()); } oprot.writeMapEnd(); } @@ -119708,10 +120028,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1279 : struct.success.entrySet()) + for (Map.Entry _iter1319 : struct.success.entrySet()) { - oprot.writeString(_iter1279.getKey()); - oprot.writeString(_iter1279.getValue()); + oprot.writeString(_iter1319.getKey()); + oprot.writeString(_iter1319.getValue()); } } } @@ -119726,15 +120046,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1280 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1280.size); - String _key1281; - String _val1282; - for (int _i1283 = 0; _i1283 < _map1280.size; ++_i1283) + org.apache.thrift.protocol.TMap _map1320 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1320.size); + String _key1321; + String _val1322; + for (int _i1323 = 0; _i1323 < _map1320.size; ++_i1323) { - _key1281 = iprot.readString(); - _val1282 = iprot.readString(); - struct.success.put(_key1281, _val1282); + _key1321 = iprot.readString(); + _val1322 = iprot.readString(); + struct.success.put(_key1321, _val1322); } } struct.setSuccessIsSet(true); @@ -120329,15 +120649,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1284 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1284.size); - String _key1285; - String _val1286; - for (int _i1287 = 0; _i1287 < _map1284.size; ++_i1287) + org.apache.thrift.protocol.TMap _map1324 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1324.size); + String _key1325; + String _val1326; + for (int _i1327 = 0; _i1327 < _map1324.size; ++_i1327) { - _key1285 = iprot.readString(); - _val1286 = iprot.readString(); - struct.part_vals.put(_key1285, _val1286); + _key1325 = iprot.readString(); + _val1326 = iprot.readString(); + struct.part_vals.put(_key1325, _val1326); } iprot.readMapEnd(); } @@ -120381,10 +120701,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1288 : struct.part_vals.entrySet()) + for (Map.Entry _iter1328 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1288.getKey()); - oprot.writeString(_iter1288.getValue()); + oprot.writeString(_iter1328.getKey()); + oprot.writeString(_iter1328.getValue()); } oprot.writeMapEnd(); } @@ -120435,10 +120755,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1289 : struct.part_vals.entrySet()) + for (Map.Entry _iter1329 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1289.getKey()); - oprot.writeString(_iter1289.getValue()); + oprot.writeString(_iter1329.getKey()); + oprot.writeString(_iter1329.getValue()); } } } @@ -120461,15 +120781,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1290 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1290.size); - String _key1291; - String _val1292; - for (int _i1293 = 0; _i1293 < _map1290.size; ++_i1293) + org.apache.thrift.protocol.TMap _map1330 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1330.size); + String _key1331; + String _val1332; + for (int _i1333 = 0; _i1333 < _map1330.size; ++_i1333) { - _key1291 = iprot.readString(); - _val1292 = iprot.readString(); - struct.part_vals.put(_key1291, _val1292); + _key1331 = iprot.readString(); + _val1332 = iprot.readString(); + struct.part_vals.put(_key1331, _val1332); } } struct.setPart_valsIsSet(true); @@ -121953,15 +122273,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1294 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1294.size); - String _key1295; - String _val1296; - for (int _i1297 = 0; _i1297 < _map1294.size; ++_i1297) + org.apache.thrift.protocol.TMap _map1334 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1334.size); + String _key1335; + String _val1336; + for (int _i1337 = 0; _i1337 < _map1334.size; ++_i1337) { - _key1295 = iprot.readString(); - _val1296 = iprot.readString(); - struct.part_vals.put(_key1295, _val1296); + _key1335 = iprot.readString(); + _val1336 = iprot.readString(); + struct.part_vals.put(_key1335, _val1336); } iprot.readMapEnd(); } @@ -122005,10 +122325,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1298 : struct.part_vals.entrySet()) + for (Map.Entry _iter1338 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1298.getKey()); - oprot.writeString(_iter1298.getValue()); + oprot.writeString(_iter1338.getKey()); + oprot.writeString(_iter1338.getValue()); } oprot.writeMapEnd(); } @@ -122059,10 +122379,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1299 : struct.part_vals.entrySet()) + for (Map.Entry _iter1339 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1299.getKey()); - oprot.writeString(_iter1299.getValue()); + oprot.writeString(_iter1339.getKey()); + oprot.writeString(_iter1339.getValue()); } } } @@ -122085,15 +122405,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1300 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1300.size); - String _key1301; - String _val1302; - for (int _i1303 = 0; _i1303 < _map1300.size; ++_i1303) + org.apache.thrift.protocol.TMap _map1340 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1340.size); + String _key1341; + String _val1342; + for (int _i1343 = 0; _i1343 < _map1340.size; ++_i1343) { - _key1301 = iprot.readString(); - _val1302 = iprot.readString(); - struct.part_vals.put(_key1301, _val1302); + _key1341 = iprot.readString(); + _val1342 = iprot.readString(); + struct.part_vals.put(_key1341, _val1342); } } struct.setPart_valsIsSet(true); @@ -128817,14 +129137,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1304 = iprot.readListBegin(); - struct.success = new ArrayList(_list1304.size); - Index _elem1305; - for (int _i1306 = 0; _i1306 < _list1304.size; ++_i1306) + org.apache.thrift.protocol.TList _list1344 = iprot.readListBegin(); + struct.success = new ArrayList(_list1344.size); + Index _elem1345; + for (int _i1346 = 0; _i1346 < _list1344.size; ++_i1346) { - _elem1305 = new Index(); - _elem1305.read(iprot); - struct.success.add(_elem1305); + _elem1345 = new Index(); + _elem1345.read(iprot); + struct.success.add(_elem1345); } iprot.readListEnd(); } @@ -128868,9 +129188,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Index _iter1307 : struct.success) + for (Index _iter1347 : struct.success) { - _iter1307.write(oprot); + _iter1347.write(oprot); } oprot.writeListEnd(); } @@ -128917,9 +129237,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Index _iter1308 : struct.success) + for (Index _iter1348 : struct.success) { - _iter1308.write(oprot); + _iter1348.write(oprot); } } } @@ -128937,14 +129257,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result s BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1309 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1309.size); - Index _elem1310; - for (int _i1311 = 0; _i1311 < _list1309.size; ++_i1311) + org.apache.thrift.protocol.TList _list1349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1349.size); + Index _elem1350; + for (int _i1351 = 0; _i1351 < _list1349.size; ++_i1351) { - _elem1310 = new Index(); - _elem1310.read(iprot); - struct.success.add(_elem1310); + _elem1350 = new Index(); + _elem1350.read(iprot); + struct.success.add(_elem1350); } } struct.setSuccessIsSet(true); @@ -129923,13 +130243,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1312 = iprot.readListBegin(); - struct.success = new ArrayList(_list1312.size); - String _elem1313; - for (int _i1314 = 0; _i1314 < _list1312.size; ++_i1314) + org.apache.thrift.protocol.TList _list1352 = iprot.readListBegin(); + struct.success = new ArrayList(_list1352.size); + String _elem1353; + for (int _i1354 = 0; _i1354 < _list1352.size; ++_i1354) { - _elem1313 = iprot.readString(); - struct.success.add(_elem1313); + _elem1353 = iprot.readString(); + struct.success.add(_elem1353); } iprot.readListEnd(); } @@ -129964,9 +130284,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1315 : struct.success) + for (String _iter1355 : struct.success) { - oprot.writeString(_iter1315); + oprot.writeString(_iter1355); } oprot.writeListEnd(); } @@ -130005,9 +130325,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1316 : struct.success) + for (String _iter1356 : struct.success) { - oprot.writeString(_iter1316); + oprot.writeString(_iter1356); } } } @@ -130022,13 +130342,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1317 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1317.size); - String _elem1318; - for (int _i1319 = 0; _i1319 < _list1317.size; ++_i1319) + org.apache.thrift.protocol.TList _list1357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1357.size); + String _elem1358; + for (int _i1359 = 0; _i1359 < _list1357.size; ++_i1359) { - _elem1318 = iprot.readString(); - struct.success.add(_elem1318); + _elem1358 = iprot.readString(); + struct.success.add(_elem1358); } } struct.setSuccessIsSet(true); @@ -149515,13 +149835,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1320 = iprot.readListBegin(); - struct.success = new ArrayList(_list1320.size); - String _elem1321; - for (int _i1322 = 0; _i1322 < _list1320.size; ++_i1322) + org.apache.thrift.protocol.TList _list1360 = iprot.readListBegin(); + struct.success = new ArrayList(_list1360.size); + String _elem1361; + for (int _i1362 = 0; _i1362 < _list1360.size; ++_i1362) { - _elem1321 = iprot.readString(); - struct.success.add(_elem1321); + _elem1361 = iprot.readString(); + struct.success.add(_elem1361); } iprot.readListEnd(); } @@ -149556,9 +149876,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1323 : struct.success) + for (String _iter1363 : struct.success) { - oprot.writeString(_iter1323); + oprot.writeString(_iter1363); } oprot.writeListEnd(); } @@ -149597,9 +149917,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1324 : struct.success) + for (String _iter1364 : struct.success) { - oprot.writeString(_iter1324); + oprot.writeString(_iter1364); } } } @@ -149614,13 +149934,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1325 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1325.size); - String _elem1326; - for (int _i1327 = 0; _i1327 < _list1325.size; ++_i1327) + org.apache.thrift.protocol.TList _list1365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1365.size); + String _elem1366; + for (int _i1367 = 0; _i1367 < _list1365.size; ++_i1367) { - _elem1326 = iprot.readString(); - struct.success.add(_elem1326); + _elem1366 = iprot.readString(); + struct.success.add(_elem1366); } } struct.setSuccessIsSet(true); @@ -153675,13 +153995,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1328 = iprot.readListBegin(); - struct.success = new ArrayList(_list1328.size); - String _elem1329; - for (int _i1330 = 0; _i1330 < _list1328.size; ++_i1330) + org.apache.thrift.protocol.TList _list1368 = iprot.readListBegin(); + struct.success = new ArrayList(_list1368.size); + String _elem1369; + for (int _i1370 = 0; _i1370 < _list1368.size; ++_i1370) { - _elem1329 = iprot.readString(); - struct.success.add(_elem1329); + _elem1369 = iprot.readString(); + struct.success.add(_elem1369); } iprot.readListEnd(); } @@ -153716,9 +154036,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1331 : struct.success) + for (String _iter1371 : struct.success) { - oprot.writeString(_iter1331); + oprot.writeString(_iter1371); } oprot.writeListEnd(); } @@ -153757,9 +154077,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1332 : struct.success) + for (String _iter1372 : struct.success) { - oprot.writeString(_iter1332); + oprot.writeString(_iter1372); } } } @@ -153774,13 +154094,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1333.size); - String _elem1334; - for (int _i1335 = 0; _i1335 < _list1333.size; ++_i1335) + org.apache.thrift.protocol.TList _list1373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1373.size); + String _elem1374; + for (int _i1375 = 0; _i1375 < _list1373.size; ++_i1375) { - _elem1334 = iprot.readString(); - struct.success.add(_elem1334); + _elem1374 = iprot.readString(); + struct.success.add(_elem1374); } } struct.setSuccessIsSet(true); @@ -157071,14 +157391,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1336 = iprot.readListBegin(); - struct.success = new ArrayList(_list1336.size); - Role _elem1337; - for (int _i1338 = 0; _i1338 < _list1336.size; ++_i1338) + org.apache.thrift.protocol.TList _list1376 = iprot.readListBegin(); + struct.success = new ArrayList(_list1376.size); + Role _elem1377; + for (int _i1378 = 0; _i1378 < _list1376.size; ++_i1378) { - _elem1337 = new Role(); - _elem1337.read(iprot); - struct.success.add(_elem1337); + _elem1377 = new Role(); + _elem1377.read(iprot); + struct.success.add(_elem1377); } iprot.readListEnd(); } @@ -157113,9 +157433,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1339 : struct.success) + for (Role _iter1379 : struct.success) { - _iter1339.write(oprot); + _iter1379.write(oprot); } oprot.writeListEnd(); } @@ -157154,9 +157474,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1340 : struct.success) + for (Role _iter1380 : struct.success) { - _iter1340.write(oprot); + _iter1380.write(oprot); } } } @@ -157171,14 +157491,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1341.size); - Role _elem1342; - for (int _i1343 = 0; _i1343 < _list1341.size; ++_i1343) + org.apache.thrift.protocol.TList _list1381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1381.size); + Role _elem1382; + for (int _i1383 = 0; _i1383 < _list1381.size; ++_i1383) { - _elem1342 = new Role(); - _elem1342.read(iprot); - struct.success.add(_elem1342); + _elem1382 = new Role(); + _elem1382.read(iprot); + struct.success.add(_elem1382); } } struct.setSuccessIsSet(true); @@ -160183,13 +160503,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1344 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1344.size); - String _elem1345; - for (int _i1346 = 0; _i1346 < _list1344.size; ++_i1346) + org.apache.thrift.protocol.TList _list1384 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1384.size); + String _elem1385; + for (int _i1386 = 0; _i1386 < _list1384.size; ++_i1386) { - _elem1345 = iprot.readString(); - struct.group_names.add(_elem1345); + _elem1385 = iprot.readString(); + struct.group_names.add(_elem1385); } iprot.readListEnd(); } @@ -160225,9 +160545,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1347 : struct.group_names) + for (String _iter1387 : struct.group_names) { - oprot.writeString(_iter1347); + oprot.writeString(_iter1387); } oprot.writeListEnd(); } @@ -160270,9 +160590,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1348 : struct.group_names) + for (String _iter1388 : struct.group_names) { - oprot.writeString(_iter1348); + oprot.writeString(_iter1388); } } } @@ -160293,13 +160613,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1349.size); - String _elem1350; - for (int _i1351 = 0; _i1351 < _list1349.size; ++_i1351) + org.apache.thrift.protocol.TList _list1389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1389.size); + String _elem1390; + for (int _i1391 = 0; _i1391 < _list1389.size; ++_i1391) { - _elem1350 = iprot.readString(); - struct.group_names.add(_elem1350); + _elem1390 = iprot.readString(); + struct.group_names.add(_elem1390); } } struct.setGroup_namesIsSet(true); @@ -161757,14 +162077,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1352 = iprot.readListBegin(); - struct.success = new ArrayList(_list1352.size); - HiveObjectPrivilege _elem1353; - for (int _i1354 = 0; _i1354 < _list1352.size; ++_i1354) + org.apache.thrift.protocol.TList _list1392 = iprot.readListBegin(); + struct.success = new ArrayList(_list1392.size); + HiveObjectPrivilege _elem1393; + for (int _i1394 = 0; _i1394 < _list1392.size; ++_i1394) { - _elem1353 = new HiveObjectPrivilege(); - _elem1353.read(iprot); - struct.success.add(_elem1353); + _elem1393 = new HiveObjectPrivilege(); + _elem1393.read(iprot); + struct.success.add(_elem1393); } iprot.readListEnd(); } @@ -161799,9 +162119,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1355 : struct.success) + for (HiveObjectPrivilege _iter1395 : struct.success) { - _iter1355.write(oprot); + _iter1395.write(oprot); } oprot.writeListEnd(); } @@ -161840,9 +162160,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1356 : struct.success) + for (HiveObjectPrivilege _iter1396 : struct.success) { - _iter1356.write(oprot); + _iter1396.write(oprot); } } } @@ -161857,14 +162177,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1357.size); - HiveObjectPrivilege _elem1358; - for (int _i1359 = 0; _i1359 < _list1357.size; ++_i1359) + org.apache.thrift.protocol.TList _list1397 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1397.size); + HiveObjectPrivilege _elem1398; + for (int _i1399 = 0; _i1399 < _list1397.size; ++_i1399) { - _elem1358 = new HiveObjectPrivilege(); - _elem1358.read(iprot); - struct.success.add(_elem1358); + _elem1398 = new HiveObjectPrivilege(); + _elem1398.read(iprot); + struct.success.add(_elem1398); } } struct.setSuccessIsSet(true); @@ -164766,13 +165086,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1360 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1360.size); - String _elem1361; - for (int _i1362 = 0; _i1362 < _list1360.size; ++_i1362) + org.apache.thrift.protocol.TList _list1400 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1400.size); + String _elem1401; + for (int _i1402 = 0; _i1402 < _list1400.size; ++_i1402) { - _elem1361 = iprot.readString(); - struct.group_names.add(_elem1361); + _elem1401 = iprot.readString(); + struct.group_names.add(_elem1401); } iprot.readListEnd(); } @@ -164803,9 +165123,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1363 : struct.group_names) + for (String _iter1403 : struct.group_names) { - oprot.writeString(_iter1363); + oprot.writeString(_iter1403); } oprot.writeListEnd(); } @@ -164842,9 +165162,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1364 : struct.group_names) + for (String _iter1404 : struct.group_names) { - oprot.writeString(_iter1364); + oprot.writeString(_iter1404); } } } @@ -164860,13 +165180,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1365.size); - String _elem1366; - for (int _i1367 = 0; _i1367 < _list1365.size; ++_i1367) + org.apache.thrift.protocol.TList _list1405 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1405.size); + String _elem1406; + for (int _i1407 = 0; _i1407 < _list1405.size; ++_i1407) { - _elem1366 = iprot.readString(); - struct.group_names.add(_elem1366); + _elem1406 = iprot.readString(); + struct.group_names.add(_elem1406); } } struct.setGroup_namesIsSet(true); @@ -165269,13 +165589,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1368 = iprot.readListBegin(); - struct.success = new ArrayList(_list1368.size); - String _elem1369; - for (int _i1370 = 0; _i1370 < _list1368.size; ++_i1370) + org.apache.thrift.protocol.TList _list1408 = iprot.readListBegin(); + struct.success = new ArrayList(_list1408.size); + String _elem1409; + for (int _i1410 = 0; _i1410 < _list1408.size; ++_i1410) { - _elem1369 = iprot.readString(); - struct.success.add(_elem1369); + _elem1409 = iprot.readString(); + struct.success.add(_elem1409); } iprot.readListEnd(); } @@ -165310,9 +165630,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1371 : struct.success) + for (String _iter1411 : struct.success) { - oprot.writeString(_iter1371); + oprot.writeString(_iter1411); } oprot.writeListEnd(); } @@ -165351,9 +165671,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1372 : struct.success) + for (String _iter1412 : struct.success) { - oprot.writeString(_iter1372); + oprot.writeString(_iter1412); } } } @@ -165368,13 +165688,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1373.size); - String _elem1374; - for (int _i1375 = 0; _i1375 < _list1373.size; ++_i1375) + org.apache.thrift.protocol.TList _list1413 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1413.size); + String _elem1414; + for (int _i1415 = 0; _i1415 < _list1413.size; ++_i1415) { - _elem1374 = iprot.readString(); - struct.success.add(_elem1374); + _elem1414 = iprot.readString(); + struct.success.add(_elem1414); } } struct.setSuccessIsSet(true); @@ -170665,13 +170985,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1376 = iprot.readListBegin(); - struct.success = new ArrayList(_list1376.size); - String _elem1377; - for (int _i1378 = 0; _i1378 < _list1376.size; ++_i1378) + org.apache.thrift.protocol.TList _list1416 = iprot.readListBegin(); + struct.success = new ArrayList(_list1416.size); + String _elem1417; + for (int _i1418 = 0; _i1418 < _list1416.size; ++_i1418) { - _elem1377 = iprot.readString(); - struct.success.add(_elem1377); + _elem1417 = iprot.readString(); + struct.success.add(_elem1417); } iprot.readListEnd(); } @@ -170697,9 +171017,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1379 : struct.success) + for (String _iter1419 : struct.success) { - oprot.writeString(_iter1379); + oprot.writeString(_iter1419); } oprot.writeListEnd(); } @@ -170730,9 +171050,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1380 : struct.success) + for (String _iter1420 : struct.success) { - oprot.writeString(_iter1380); + oprot.writeString(_iter1420); } } } @@ -170744,13 +171064,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1381.size); - String _elem1382; - for (int _i1383 = 0; _i1383 < _list1381.size; ++_i1383) + org.apache.thrift.protocol.TList _list1421 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1421.size); + String _elem1422; + for (int _i1423 = 0; _i1423 < _list1421.size; ++_i1423) { - _elem1382 = iprot.readString(); - struct.success.add(_elem1382); + _elem1422 = iprot.readString(); + struct.success.add(_elem1422); } } struct.setSuccessIsSet(true); @@ -173037,21 +173357,1276 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof remove_master_key_result) - return this.equals((remove_master_key_result)that); + if (that instanceof remove_master_key_result) + return this.equals((remove_master_key_result)that); + return false; + } + + public boolean equals(remove_master_key_result that) { + if (that == null) + return false; + + boolean this_present_success = true; + boolean that_present_success = true; + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (this.success != that.success) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true; + list.add(present_success); + if (present_success) + list.add(success); + + return list.hashCode(); + } + + @Override + public int compareTo(remove_master_key_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("remove_master_key_result("); + boolean first = true; + + sb.append("success:"); + sb.append(this.success); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class remove_master_key_resultStandardSchemeFactory implements SchemeFactory { + public remove_master_key_resultStandardScheme getScheme() { + return new remove_master_key_resultStandardScheme(); + } + } + + private static class remove_master_key_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, remove_master_key_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, remove_master_key_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeBool(struct.success); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class remove_master_key_resultTupleSchemeFactory implements SchemeFactory { + public remove_master_key_resultTupleScheme getScheme() { + return new remove_master_key_resultTupleScheme(); + } + } + + private static class remove_master_key_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, remove_master_key_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + oprot.writeBool(struct.success); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, remove_master_key_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_master_keys_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_master_keys_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_master_keys_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_master_keys_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_master_keys_args.class, metaDataMap); + } + + public get_master_keys_args() { + } + + /** + * Performs a deep copy on other. + */ + public get_master_keys_args(get_master_keys_args other) { + } + + public get_master_keys_args deepCopy() { + return new get_master_keys_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_master_keys_args) + return this.equals((get_master_keys_args)that); + return false; + } + + public boolean equals(get_master_keys_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(get_master_keys_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_master_keys_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_master_keys_argsStandardSchemeFactory implements SchemeFactory { + public get_master_keys_argsStandardScheme getScheme() { + return new get_master_keys_argsStandardScheme(); + } + } + + private static class get_master_keys_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_master_keys_argsTupleSchemeFactory implements SchemeFactory { + public get_master_keys_argsTupleScheme getScheme() { + return new get_master_keys_argsTupleScheme(); + } + } + + private static class get_master_keys_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_master_keys_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_master_keys_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_master_keys_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_master_keys_resultTupleSchemeFactory()); + } + + private List success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_master_keys_result.class, metaDataMap); + } + + public get_master_keys_result() { + } + + public get_master_keys_result( + List success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public get_master_keys_result(get_master_keys_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(other.success); + this.success = __this__success; + } + } + + public get_master_keys_result deepCopy() { + return new get_master_keys_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(String elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_master_keys_result) + return this.equals((get_master_keys_result)that); + return false; + } + + public boolean equals(get_master_keys_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + return list.hashCode(); + } + + @Override + public int compareTo(get_master_keys_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_master_keys_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_master_keys_resultStandardSchemeFactory implements SchemeFactory { + public get_master_keys_resultStandardScheme getScheme() { + return new get_master_keys_resultStandardScheme(); + } + } + + private static class get_master_keys_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1424 = iprot.readListBegin(); + struct.success = new ArrayList(_list1424.size); + String _elem1425; + for (int _i1426 = 0; _i1426 < _list1424.size; ++_i1426) + { + _elem1425 = iprot.readString(); + struct.success.add(_elem1425); + } + iprot.readListEnd(); + } + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); + for (String _iter1427 : struct.success) + { + oprot.writeString(_iter1427); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_master_keys_resultTupleSchemeFactory implements SchemeFactory { + public get_master_keys_resultTupleScheme getScheme() { + return new get_master_keys_resultTupleScheme(); + } + } + + private static class get_master_keys_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + { + oprot.writeI32(struct.success.size()); + for (String _iter1428 : struct.success) + { + oprot.writeString(_iter1428); + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list1429 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1429.size); + String _elem1430; + for (int _i1431 = 0; _i1431 < _list1429.size; ++_i1431) + { + _elem1430 = iprot.readString(); + struct.success.add(_elem1430); + } + } + struct.setSuccessIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_open_txns_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_open_txns_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_args.class, metaDataMap); + } + + public get_open_txns_args() { + } + + /** + * Performs a deep copy on other. + */ + public get_open_txns_args(get_open_txns_args other) { + } + + public get_open_txns_args deepCopy() { + return new get_open_txns_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_open_txns_args) + return this.equals((get_open_txns_args)that); + return false; + } + + public boolean equals(get_open_txns_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(get_open_txns_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_open_txns_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_open_txns_argsStandardSchemeFactory implements SchemeFactory { + public get_open_txns_argsStandardScheme getScheme() { + return new get_open_txns_argsStandardScheme(); + } + } + + private static class get_open_txns_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_open_txns_argsTupleSchemeFactory implements SchemeFactory { + public get_open_txns_argsTupleScheme getScheme() { + return new get_open_txns_argsTupleScheme(); + } + } + + private static class get_open_txns_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_open_txns_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_open_txns_resultTupleSchemeFactory()); + } + + private GetOpenTxnsResponse success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetOpenTxnsResponse.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_result.class, metaDataMap); + } + + public get_open_txns_result() { + } + + public get_open_txns_result( + GetOpenTxnsResponse success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public get_open_txns_result(get_open_txns_result other) { + if (other.isSetSuccess()) { + this.success = new GetOpenTxnsResponse(other.success); + } + } + + public get_open_txns_result deepCopy() { + return new get_open_txns_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public GetOpenTxnsResponse getSuccess() { + return this.success; + } + + public void setSuccess(GetOpenTxnsResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((GetOpenTxnsResponse)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_open_txns_result) + return this.equals((get_open_txns_result)that); return false; } - public boolean equals(remove_master_key_result that) { + public boolean equals(get_open_txns_result that) { if (that == null) return false; - boolean this_present_success = true; - boolean that_present_success = true; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); if (this_present_success || that_present_success) { if (!(this_present_success && that_present_success)) return false; - if (this.success != that.success) + if (!this.success.equals(that.success)) return false; } @@ -173062,7 +174637,7 @@ public boolean equals(remove_master_key_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true; + boolean present_success = true && (isSetSuccess()); list.add(present_success); if (present_success) list.add(success); @@ -173071,7 +174646,7 @@ public int hashCode() { } @Override - public int compareTo(remove_master_key_result other) { + public int compareTo(get_open_txns_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -173105,11 +174680,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("remove_master_key_result("); + StringBuilder sb = new StringBuilder("get_open_txns_result("); boolean first = true; sb.append("success:"); - sb.append(this.success); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } first = false; sb.append(")"); return sb.toString(); @@ -173118,6 +174697,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -173130,23 +174712,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class remove_master_key_resultStandardSchemeFactory implements SchemeFactory { - public remove_master_key_resultStandardScheme getScheme() { - return new remove_master_key_resultStandardScheme(); + private static class get_open_txns_resultStandardSchemeFactory implements SchemeFactory { + public get_open_txns_resultStandardScheme getScheme() { + return new get_open_txns_resultStandardScheme(); } } - private static class remove_master_key_resultStandardScheme extends StandardScheme { + private static class get_open_txns_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, remove_master_key_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -173157,8 +174737,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, remove_master_key_r } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.success = iprot.readBool(); + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new GetOpenTxnsResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -173173,13 +174754,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, remove_master_key_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, remove_master_key_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetSuccess()) { + if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeBool(struct.success); + struct.success.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -173188,16 +174769,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, remove_master_key_ } - private static class remove_master_key_resultTupleSchemeFactory implements SchemeFactory { - public remove_master_key_resultTupleScheme getScheme() { - return new remove_master_key_resultTupleScheme(); + private static class get_open_txns_resultTupleSchemeFactory implements SchemeFactory { + public get_open_txns_resultTupleScheme getScheme() { + return new get_open_txns_resultTupleScheme(); } } - private static class remove_master_key_resultTupleScheme extends TupleScheme { + private static class get_open_txns_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, remove_master_key_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -173205,16 +174786,17 @@ public void write(org.apache.thrift.protocol.TProtocol prot, remove_master_key_r } oprot.writeBitSet(optionals, 1); if (struct.isSetSuccess()) { - oprot.writeBool(struct.success); + struct.success.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, remove_master_key_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = iprot.readBool(); + struct.success = new GetOpenTxnsResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } } @@ -173222,14 +174804,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, remove_master_key_re } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_master_keys_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_master_keys_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_info_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_info_args"); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_master_keys_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_master_keys_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_open_txns_info_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_open_txns_info_argsTupleSchemeFactory()); } @@ -173292,20 +174874,20 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_master_keys_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_info_args.class, metaDataMap); } - public get_master_keys_args() { + public get_open_txns_info_args() { } /** * Performs a deep copy on other. */ - public get_master_keys_args(get_master_keys_args other) { + public get_open_txns_info_args(get_open_txns_info_args other) { } - public get_master_keys_args deepCopy() { - return new get_master_keys_args(this); + public get_open_txns_info_args deepCopy() { + return new get_open_txns_info_args(this); } @Override @@ -173338,12 +174920,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_master_keys_args) - return this.equals((get_master_keys_args)that); + if (that instanceof get_open_txns_info_args) + return this.equals((get_open_txns_info_args)that); return false; } - public boolean equals(get_master_keys_args that) { + public boolean equals(get_open_txns_info_args that) { if (that == null) return false; @@ -173358,7 +174940,7 @@ public int hashCode() { } @Override - public int compareTo(get_master_keys_args other) { + public int compareTo(get_open_txns_info_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -173382,7 +174964,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_master_keys_args("); + StringBuilder sb = new StringBuilder("get_open_txns_info_args("); boolean first = true; sb.append(")"); @@ -173410,15 +174992,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_master_keys_argsStandardSchemeFactory implements SchemeFactory { - public get_master_keys_argsStandardScheme getScheme() { - return new get_master_keys_argsStandardScheme(); + private static class get_open_txns_info_argsStandardSchemeFactory implements SchemeFactory { + public get_open_txns_info_argsStandardScheme getScheme() { + return new get_open_txns_info_argsStandardScheme(); } } - private static class get_master_keys_argsStandardScheme extends StandardScheme { + private static class get_open_txns_info_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -173437,7 +175019,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_arg struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_info_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -173447,39 +175029,39 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_ar } - private static class get_master_keys_argsTupleSchemeFactory implements SchemeFactory { - public get_master_keys_argsTupleScheme getScheme() { - return new get_master_keys_argsTupleScheme(); + private static class get_open_txns_info_argsTupleSchemeFactory implements SchemeFactory { + public get_open_txns_info_argsTupleScheme getScheme() { + return new get_open_txns_info_argsTupleScheme(); } } - private static class get_master_keys_argsTupleScheme extends TupleScheme { + private static class get_open_txns_info_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_master_keys_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_master_keys_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_info_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_info_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_master_keys_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_master_keys_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_open_txns_info_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_open_txns_info_resultTupleSchemeFactory()); } - private List success; // required + private GetOpenTxnsInfoResponse success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -173544,17 +175126,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetOpenTxnsInfoResponse.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_master_keys_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_info_result.class, metaDataMap); } - public get_master_keys_result() { + public get_open_txns_info_result() { } - public get_master_keys_result( - List success) + public get_open_txns_info_result( + GetOpenTxnsInfoResponse success) { this(); this.success = success; @@ -173563,15 +175144,14 @@ public get_master_keys_result( /** * Performs a deep copy on other. */ - public get_master_keys_result(get_master_keys_result other) { + public get_open_txns_info_result(get_open_txns_info_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success); - this.success = __this__success; + this.success = new GetOpenTxnsInfoResponse(other.success); } } - public get_master_keys_result deepCopy() { - return new get_master_keys_result(this); + public get_open_txns_info_result deepCopy() { + return new get_open_txns_info_result(this); } @Override @@ -173579,26 +175159,11 @@ public void clear() { this.success = null; } - public int getSuccessSize() { - return (this.success == null) ? 0 : this.success.size(); - } - - public java.util.Iterator getSuccessIterator() { - return (this.success == null) ? null : this.success.iterator(); - } - - public void addToSuccess(String elem) { - if (this.success == null) { - this.success = new ArrayList(); - } - this.success.add(elem); - } - - public List getSuccess() { + public GetOpenTxnsInfoResponse getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(GetOpenTxnsInfoResponse success) { this.success = success; } @@ -173623,7 +175188,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); + setSuccess((GetOpenTxnsInfoResponse)value); } break; @@ -173656,12 +175221,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_master_keys_result) - return this.equals((get_master_keys_result)that); + if (that instanceof get_open_txns_info_result) + return this.equals((get_open_txns_info_result)that); return false; } - public boolean equals(get_master_keys_result that) { + public boolean equals(get_open_txns_info_result that) { if (that == null) return false; @@ -173690,7 +175255,7 @@ public int hashCode() { } @Override - public int compareTo(get_master_keys_result other) { + public int compareTo(get_open_txns_info_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -173724,7 +175289,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_master_keys_result("); + StringBuilder sb = new StringBuilder("get_open_txns_info_result("); boolean first = true; sb.append("success:"); @@ -173741,6 +175306,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -173759,15 +175327,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_master_keys_resultStandardSchemeFactory implements SchemeFactory { - public get_master_keys_resultStandardScheme getScheme() { - return new get_master_keys_resultStandardScheme(); + private static class get_open_txns_info_resultStandardSchemeFactory implements SchemeFactory { + public get_open_txns_info_resultStandardScheme getScheme() { + return new get_open_txns_info_resultStandardScheme(); } } - private static class get_master_keys_resultStandardScheme extends StandardScheme { + private static class get_open_txns_info_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -173778,18 +175346,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list1384 = iprot.readListBegin(); - struct.success = new ArrayList(_list1384.size); - String _elem1385; - for (int _i1386 = 0; _i1386 < _list1384.size; ++_i1386) - { - _elem1385 = iprot.readString(); - struct.success.add(_elem1385); - } - iprot.readListEnd(); - } + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new GetOpenTxnsInfoResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -173804,20 +175363,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_info_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1387 : struct.success) - { - oprot.writeString(_iter1387); - } - oprot.writeListEnd(); - } + struct.success.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -173826,16 +175378,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re } - private static class get_master_keys_resultTupleSchemeFactory implements SchemeFactory { - public get_master_keys_resultTupleScheme getScheme() { - return new get_master_keys_resultTupleScheme(); + private static class get_open_txns_info_resultTupleSchemeFactory implements SchemeFactory { + public get_open_txns_info_resultTupleScheme getScheme() { + return new get_open_txns_info_resultTupleScheme(); } } - private static class get_master_keys_resultTupleScheme extends TupleScheme { + private static class get_open_txns_info_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -173843,31 +175395,17 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res } oprot.writeBitSet(optionals, 1); if (struct.isSetSuccess()) { - { - oprot.writeI32(struct.success.size()); - for (String _iter1388 : struct.success) - { - oprot.writeString(_iter1388); - } - } + struct.success.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list1389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1389.size); - String _elem1390; - for (int _i1391 = 0; _i1391 < _list1389.size; ++_i1391) - { - _elem1390 = iprot.readString(); - struct.success.add(_elem1390); - } - } + struct.success = new GetOpenTxnsInfoResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } } @@ -173875,20 +175413,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class open_txns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("open_txns_args"); + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_open_txns_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_open_txns_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new open_txns_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new open_txns_argsTupleSchemeFactory()); } + private OpenTxnRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + RQST((short)1, "rqst"); private static final Map byName = new HashMap(); @@ -173903,6 +175443,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 1: // RQST + return RQST; default: return null; } @@ -173941,37 +175483,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, OpenTxnRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(open_txns_args.class, metaDataMap); } - public get_open_txns_args() { + public open_txns_args() { + } + + public open_txns_args( + OpenTxnRequest rqst) + { + this(); + this.rqst = rqst; } /** * Performs a deep copy on other. */ - public get_open_txns_args(get_open_txns_args other) { + public open_txns_args(open_txns_args other) { + if (other.isSetRqst()) { + this.rqst = new OpenTxnRequest(other.rqst); + } } - public get_open_txns_args deepCopy() { - return new get_open_txns_args(this); + public open_txns_args deepCopy() { + return new open_txns_args(this); } @Override public void clear() { + this.rqst = null; + } + + public OpenTxnRequest getRqst() { + return this.rqst; + } + + public void setRqst(OpenTxnRequest rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((OpenTxnRequest)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case RQST: + return getRqst(); + } throw new IllegalStateException(); } @@ -173983,6 +175574,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case RQST: + return isSetRqst(); } throw new IllegalStateException(); } @@ -173991,15 +175584,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_open_txns_args) - return this.equals((get_open_txns_args)that); + if (that instanceof open_txns_args) + return this.equals((open_txns_args)that); return false; } - public boolean equals(get_open_txns_args that) { + public boolean equals(open_txns_args that) { if (that == null) return false; + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + return true; } @@ -174007,17 +175609,32 @@ public boolean equals(get_open_txns_args that) { public int hashCode() { List list = new ArrayList(); + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + return list.hashCode(); } @Override - public int compareTo(get_open_txns_args other) { + public int compareTo(open_txns_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -174035,9 +175652,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_open_txns_args("); + StringBuilder sb = new StringBuilder("open_txns_args("); boolean first = true; + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; sb.append(")"); return sb.toString(); } @@ -174045,6 +175669,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -174063,15 +175690,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_open_txns_argsStandardSchemeFactory implements SchemeFactory { - public get_open_txns_argsStandardScheme getScheme() { - return new get_open_txns_argsStandardScheme(); + private static class open_txns_argsStandardSchemeFactory implements SchemeFactory { + public open_txns_argsStandardScheme getScheme() { + return new open_txns_argsStandardScheme(); } } - private static class get_open_txns_argsStandardScheme extends StandardScheme { + private static class open_txns_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174081,6 +175708,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_args break; } switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new OpenTxnRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -174090,49 +175726,68 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_args struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, open_txns_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_open_txns_argsTupleSchemeFactory implements SchemeFactory { - public get_open_txns_argsTupleScheme getScheme() { - return new get_open_txns_argsTupleScheme(); + private static class open_txns_argsTupleSchemeFactory implements SchemeFactory { + public open_txns_argsTupleScheme getScheme() { + return new open_txns_argsTupleScheme(); } } - private static class get_open_txns_argsTupleScheme extends TupleScheme { + private static class open_txns_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, open_txns_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, open_txns_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new OpenTxnRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class open_txns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("open_txns_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_open_txns_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_open_txns_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new open_txns_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new open_txns_resultTupleSchemeFactory()); } - private GetOpenTxnsResponse success; // required + private OpenTxnsResponse success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -174197,16 +175852,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetOpenTxnsResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, OpenTxnsResponse.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(open_txns_result.class, metaDataMap); } - public get_open_txns_result() { + public open_txns_result() { } - public get_open_txns_result( - GetOpenTxnsResponse success) + public open_txns_result( + OpenTxnsResponse success) { this(); this.success = success; @@ -174215,14 +175870,14 @@ public get_open_txns_result( /** * Performs a deep copy on other. */ - public get_open_txns_result(get_open_txns_result other) { + public open_txns_result(open_txns_result other) { if (other.isSetSuccess()) { - this.success = new GetOpenTxnsResponse(other.success); + this.success = new OpenTxnsResponse(other.success); } } - public get_open_txns_result deepCopy() { - return new get_open_txns_result(this); + public open_txns_result deepCopy() { + return new open_txns_result(this); } @Override @@ -174230,11 +175885,11 @@ public void clear() { this.success = null; } - public GetOpenTxnsResponse getSuccess() { + public OpenTxnsResponse getSuccess() { return this.success; } - public void setSuccess(GetOpenTxnsResponse success) { + public void setSuccess(OpenTxnsResponse success) { this.success = success; } @@ -174259,7 +175914,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((GetOpenTxnsResponse)value); + setSuccess((OpenTxnsResponse)value); } break; @@ -174292,12 +175947,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_open_txns_result) - return this.equals((get_open_txns_result)that); + if (that instanceof open_txns_result) + return this.equals((open_txns_result)that); return false; } - public boolean equals(get_open_txns_result that) { + public boolean equals(open_txns_result that) { if (that == null) return false; @@ -174326,7 +175981,7 @@ public int hashCode() { } @Override - public int compareTo(get_open_txns_result other) { + public int compareTo(open_txns_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -174360,7 +176015,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_open_txns_result("); + StringBuilder sb = new StringBuilder("open_txns_result("); boolean first = true; sb.append("success:"); @@ -174398,15 +176053,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_open_txns_resultStandardSchemeFactory implements SchemeFactory { - public get_open_txns_resultStandardScheme getScheme() { - return new get_open_txns_resultStandardScheme(); + private static class open_txns_resultStandardSchemeFactory implements SchemeFactory { + public open_txns_resultStandardScheme getScheme() { + return new open_txns_resultStandardScheme(); } } - private static class get_open_txns_resultStandardScheme extends StandardScheme { + private static class open_txns_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174418,7 +176073,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_resul switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new GetOpenTxnsResponse(); + struct.success = new OpenTxnsResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -174434,7 +176089,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_resul struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, open_txns_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -174449,16 +176104,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_resu } - private static class get_open_txns_resultTupleSchemeFactory implements SchemeFactory { - public get_open_txns_resultTupleScheme getScheme() { - return new get_open_txns_resultTupleScheme(); + private static class open_txns_resultTupleSchemeFactory implements SchemeFactory { + public open_txns_resultTupleScheme getScheme() { + return new open_txns_resultTupleScheme(); } } - private static class get_open_txns_resultTupleScheme extends TupleScheme { + private static class open_txns_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, open_txns_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -174471,11 +176126,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_resul } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, open_txns_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new GetOpenTxnsResponse(); + struct.success = new OpenTxnsResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -174484,20 +176139,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_result } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_info_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_info_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txn_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txn_args"); + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_open_txns_info_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_open_txns_info_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new abort_txn_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new abort_txn_argsTupleSchemeFactory()); } + private AbortTxnRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + RQST((short)1, "rqst"); private static final Map byName = new HashMap(); @@ -174512,6 +176169,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_result */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 1: // RQST + return RQST; default: return null; } @@ -174550,37 +176209,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AbortTxnRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_info_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txn_args.class, metaDataMap); } - public get_open_txns_info_args() { + public abort_txn_args() { + } + + public abort_txn_args( + AbortTxnRequest rqst) + { + this(); + this.rqst = rqst; } /** * Performs a deep copy on other. */ - public get_open_txns_info_args(get_open_txns_info_args other) { + public abort_txn_args(abort_txn_args other) { + if (other.isSetRqst()) { + this.rqst = new AbortTxnRequest(other.rqst); + } } - public get_open_txns_info_args deepCopy() { - return new get_open_txns_info_args(this); + public abort_txn_args deepCopy() { + return new abort_txn_args(this); } @Override public void clear() { + this.rqst = null; + } + + public AbortTxnRequest getRqst() { + return this.rqst; + } + + public void setRqst(AbortTxnRequest rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((AbortTxnRequest)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case RQST: + return getRqst(); + } throw new IllegalStateException(); } @@ -174592,6 +176300,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case RQST: + return isSetRqst(); } throw new IllegalStateException(); } @@ -174600,15 +176310,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_open_txns_info_args) - return this.equals((get_open_txns_info_args)that); + if (that instanceof abort_txn_args) + return this.equals((abort_txn_args)that); return false; } - public boolean equals(get_open_txns_info_args that) { + public boolean equals(abort_txn_args that) { if (that == null) return false; + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + return true; } @@ -174616,17 +176335,32 @@ public boolean equals(get_open_txns_info_args that) { public int hashCode() { List list = new ArrayList(); + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + return list.hashCode(); } @Override - public int compareTo(get_open_txns_info_args other) { + public int compareTo(abort_txn_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -174644,9 +176378,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_open_txns_info_args("); + StringBuilder sb = new StringBuilder("abort_txn_args("); boolean first = true; + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; sb.append(")"); return sb.toString(); } @@ -174654,6 +176395,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -174672,15 +176416,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_open_txns_info_argsStandardSchemeFactory implements SchemeFactory { - public get_open_txns_info_argsStandardScheme getScheme() { - return new get_open_txns_info_argsStandardScheme(); + private static class abort_txn_argsStandardSchemeFactory implements SchemeFactory { + public abort_txn_argsStandardScheme getScheme() { + return new abort_txn_argsStandardScheme(); } } - private static class get_open_txns_info_argsStandardScheme extends StandardScheme { + private static class abort_txn_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174690,6 +176434,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_ break; } switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new AbortTxnRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -174699,53 +176452,72 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_info_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txn_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_open_txns_info_argsTupleSchemeFactory implements SchemeFactory { - public get_open_txns_info_argsTupleScheme getScheme() { - return new get_open_txns_info_argsTupleScheme(); + private static class abort_txn_argsTupleSchemeFactory implements SchemeFactory { + public abort_txn_argsTupleScheme getScheme() { + return new abort_txn_argsTupleScheme(); } } - private static class get_open_txns_info_argsTupleScheme extends TupleScheme { + private static class abort_txn_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, abort_txn_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, abort_txn_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new AbortTxnRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_info_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_info_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txn_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txn_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_open_txns_info_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_open_txns_info_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new abort_txn_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new abort_txn_resultTupleSchemeFactory()); } - private GetOpenTxnsInfoResponse success; // required + private NoSuchTxnException o1; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); + O1((short)1, "o1"); private static final Map byName = new HashMap(); @@ -174760,8 +176532,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_a */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; + case 1: // O1 + return O1; default: return null; } @@ -174805,70 +176577,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetOpenTxnsInfoResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_info_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txn_result.class, metaDataMap); } - public get_open_txns_info_result() { + public abort_txn_result() { } - public get_open_txns_info_result( - GetOpenTxnsInfoResponse success) + public abort_txn_result( + NoSuchTxnException o1) { this(); - this.success = success; + this.o1 = o1; } /** * Performs a deep copy on other. */ - public get_open_txns_info_result(get_open_txns_info_result other) { - if (other.isSetSuccess()) { - this.success = new GetOpenTxnsInfoResponse(other.success); + public abort_txn_result(abort_txn_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchTxnException(other.o1); } } - public get_open_txns_info_result deepCopy() { - return new get_open_txns_info_result(this); + public abort_txn_result deepCopy() { + return new abort_txn_result(this); } @Override public void clear() { - this.success = null; + this.o1 = null; } - public GetOpenTxnsInfoResponse getSuccess() { - return this.success; + public NoSuchTxnException getO1() { + return this.o1; } - public void setSuccess(GetOpenTxnsInfoResponse success) { - this.success = success; + public void setO1(NoSuchTxnException o1) { + this.o1 = o1; } - public void unsetSuccess() { - this.success = null; + public void unsetO1() { + this.o1 = null; } - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; } - public void setSuccessIsSet(boolean value) { + public void setO1IsSet(boolean value) { if (!value) { - this.success = null; + this.o1 = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case SUCCESS: + case O1: if (value == null) { - unsetSuccess(); + unsetO1(); } else { - setSuccess((GetOpenTxnsInfoResponse)value); + setO1((NoSuchTxnException)value); } break; @@ -174877,8 +176649,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case SUCCESS: - return getSuccess(); + case O1: + return getO1(); } throw new IllegalStateException(); @@ -174891,8 +176663,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case SUCCESS: - return isSetSuccess(); + case O1: + return isSetO1(); } throw new IllegalStateException(); } @@ -174901,21 +176673,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_open_txns_info_result) - return this.equals((get_open_txns_info_result)that); + if (that instanceof abort_txn_result) + return this.equals((abort_txn_result)that); return false; } - public boolean equals(get_open_txns_info_result that) { + public boolean equals(abort_txn_result that) { if (that == null) return false; - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) return false; - if (!this.success.equals(that.success)) + if (!this.o1.equals(that.o1)) return false; } @@ -174926,28 +176698,28 @@ public boolean equals(get_open_txns_info_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); return list.hashCode(); } @Override - public int compareTo(get_open_txns_info_result other) { + public int compareTo(abort_txn_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); if (lastComparison != 0) { return lastComparison; } @@ -174969,14 +176741,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_open_txns_info_result("); + StringBuilder sb = new StringBuilder("abort_txn_result("); boolean first = true; - sb.append("success:"); - if (this.success == null) { + sb.append("o1:"); + if (this.o1 == null) { sb.append("null"); } else { - sb.append(this.success); + sb.append(this.o1); } first = false; sb.append(")"); @@ -174986,9 +176758,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (success != null) { - success.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -175007,15 +176776,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_open_txns_info_resultStandardSchemeFactory implements SchemeFactory { - public get_open_txns_info_resultStandardScheme getScheme() { - return new get_open_txns_info_resultStandardScheme(); + private static class abort_txn_resultStandardSchemeFactory implements SchemeFactory { + public abort_txn_resultStandardScheme getScheme() { + return new abort_txn_resultStandardScheme(); } } - private static class get_open_txns_info_resultStandardScheme extends StandardScheme { + private static class abort_txn_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175025,11 +176794,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_ break; } switch (schemeField.id) { - case 0: // SUCCESS + case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new GetOpenTxnsInfoResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -175043,13 +176812,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_info_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txn_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -175058,53 +176827,53 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_info } - private static class get_open_txns_info_resultTupleSchemeFactory implements SchemeFactory { - public get_open_txns_info_resultTupleScheme getScheme() { - return new get_open_txns_info_resultTupleScheme(); + private static class abort_txn_resultTupleSchemeFactory implements SchemeFactory { + public abort_txn_resultTupleScheme getScheme() { + return new abort_txn_resultTupleScheme(); } } - private static class get_open_txns_info_resultTupleScheme extends TupleScheme { + private static class abort_txn_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, abort_txn_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { + if (struct.isSetO1()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); + if (struct.isSetO1()) { + struct.o1.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, abort_txn_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new GetOpenTxnsInfoResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class open_txns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("open_txns_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txns_args"); private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new open_txns_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new open_txns_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new abort_txns_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new abort_txns_argsTupleSchemeFactory()); } - private OpenTxnRequest rqst; // required + private AbortTxnsRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -175169,16 +176938,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, OpenTxnRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AbortTxnsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(open_txns_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txns_args.class, metaDataMap); } - public open_txns_args() { + public abort_txns_args() { } - public open_txns_args( - OpenTxnRequest rqst) + public abort_txns_args( + AbortTxnsRequest rqst) { this(); this.rqst = rqst; @@ -175187,14 +176956,14 @@ public open_txns_args( /** * Performs a deep copy on other. */ - public open_txns_args(open_txns_args other) { + public abort_txns_args(abort_txns_args other) { if (other.isSetRqst()) { - this.rqst = new OpenTxnRequest(other.rqst); + this.rqst = new AbortTxnsRequest(other.rqst); } } - public open_txns_args deepCopy() { - return new open_txns_args(this); + public abort_txns_args deepCopy() { + return new abort_txns_args(this); } @Override @@ -175202,11 +176971,11 @@ public void clear() { this.rqst = null; } - public OpenTxnRequest getRqst() { + public AbortTxnsRequest getRqst() { return this.rqst; } - public void setRqst(OpenTxnRequest rqst) { + public void setRqst(AbortTxnsRequest rqst) { this.rqst = rqst; } @@ -175231,7 +177000,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRqst(); } else { - setRqst((OpenTxnRequest)value); + setRqst((AbortTxnsRequest)value); } break; @@ -175264,12 +177033,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof open_txns_args) - return this.equals((open_txns_args)that); + if (that instanceof abort_txns_args) + return this.equals((abort_txns_args)that); return false; } - public boolean equals(open_txns_args that) { + public boolean equals(abort_txns_args that) { if (that == null) return false; @@ -175298,7 +177067,7 @@ public int hashCode() { } @Override - public int compareTo(open_txns_args other) { + public int compareTo(abort_txns_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -175332,7 +177101,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("open_txns_args("); + StringBuilder sb = new StringBuilder("abort_txns_args("); boolean first = true; sb.append("rqst:"); @@ -175370,15 +177139,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class open_txns_argsStandardSchemeFactory implements SchemeFactory { - public open_txns_argsStandardScheme getScheme() { - return new open_txns_argsStandardScheme(); + private static class abort_txns_argsStandardSchemeFactory implements SchemeFactory { + public abort_txns_argsStandardScheme getScheme() { + return new abort_txns_argsStandardScheme(); } } - private static class open_txns_argsStandardScheme extends StandardScheme { + private static class abort_txns_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175390,7 +177159,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_args stru switch (schemeField.id) { case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new OpenTxnRequest(); + struct.rqst = new AbortTxnsRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } else { @@ -175406,7 +177175,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_args stru struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, open_txns_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txns_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -175421,16 +177190,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, open_txns_args str } - private static class open_txns_argsTupleSchemeFactory implements SchemeFactory { - public open_txns_argsTupleScheme getScheme() { - return new open_txns_argsTupleScheme(); + private static class abort_txns_argsTupleSchemeFactory implements SchemeFactory { + public abort_txns_argsTupleScheme getScheme() { + return new abort_txns_argsTupleScheme(); } } - private static class open_txns_argsTupleScheme extends TupleScheme { + private static class abort_txns_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, open_txns_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, abort_txns_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRqst()) { @@ -175443,11 +177212,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, open_txns_args stru } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, open_txns_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, abort_txns_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new OpenTxnRequest(); + struct.rqst = new AbortTxnsRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } @@ -175456,22 +177225,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, open_txns_args struc } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class open_txns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("open_txns_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txns_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new open_txns_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new open_txns_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new abort_txns_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new abort_txns_resultTupleSchemeFactory()); } - private OpenTxnsResponse success; // required + private NoSuchTxnException o1; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); + O1((short)1, "o1"); private static final Map byName = new HashMap(); @@ -175486,8 +177255,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, open_txns_args struc */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; + case 1: // O1 + return O1; default: return null; } @@ -175531,70 +177300,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, OpenTxnsResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(open_txns_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txns_result.class, metaDataMap); } - public open_txns_result() { + public abort_txns_result() { } - public open_txns_result( - OpenTxnsResponse success) + public abort_txns_result( + NoSuchTxnException o1) { this(); - this.success = success; + this.o1 = o1; } /** * Performs a deep copy on other. */ - public open_txns_result(open_txns_result other) { - if (other.isSetSuccess()) { - this.success = new OpenTxnsResponse(other.success); + public abort_txns_result(abort_txns_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchTxnException(other.o1); } } - public open_txns_result deepCopy() { - return new open_txns_result(this); + public abort_txns_result deepCopy() { + return new abort_txns_result(this); } @Override public void clear() { - this.success = null; + this.o1 = null; } - public OpenTxnsResponse getSuccess() { - return this.success; + public NoSuchTxnException getO1() { + return this.o1; } - public void setSuccess(OpenTxnsResponse success) { - this.success = success; + public void setO1(NoSuchTxnException o1) { + this.o1 = o1; } - public void unsetSuccess() { - this.success = null; + public void unsetO1() { + this.o1 = null; } - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; } - public void setSuccessIsSet(boolean value) { + public void setO1IsSet(boolean value) { if (!value) { - this.success = null; + this.o1 = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case SUCCESS: + case O1: if (value == null) { - unsetSuccess(); + unsetO1(); } else { - setSuccess((OpenTxnsResponse)value); + setO1((NoSuchTxnException)value); } break; @@ -175603,8 +177372,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case SUCCESS: - return getSuccess(); + case O1: + return getO1(); } throw new IllegalStateException(); @@ -175617,8 +177386,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case SUCCESS: - return isSetSuccess(); + case O1: + return isSetO1(); } throw new IllegalStateException(); } @@ -175627,21 +177396,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof open_txns_result) - return this.equals((open_txns_result)that); + if (that instanceof abort_txns_result) + return this.equals((abort_txns_result)that); return false; } - public boolean equals(open_txns_result that) { + public boolean equals(abort_txns_result that) { if (that == null) return false; - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) return false; - if (!this.success.equals(that.success)) + if (!this.o1.equals(that.o1)) return false; } @@ -175652,28 +177421,28 @@ public boolean equals(open_txns_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); return list.hashCode(); } @Override - public int compareTo(open_txns_result other) { + public int compareTo(abort_txns_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); if (lastComparison != 0) { return lastComparison; } @@ -175695,14 +177464,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("open_txns_result("); + StringBuilder sb = new StringBuilder("abort_txns_result("); boolean first = true; - sb.append("success:"); - if (this.success == null) { + sb.append("o1:"); + if (this.o1 == null) { sb.append("null"); } else { - sb.append(this.success); + sb.append(this.o1); } first = false; sb.append(")"); @@ -175712,9 +177481,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (success != null) { - success.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -175733,15 +177499,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class open_txns_resultStandardSchemeFactory implements SchemeFactory { - public open_txns_resultStandardScheme getScheme() { - return new open_txns_resultStandardScheme(); + private static class abort_txns_resultStandardSchemeFactory implements SchemeFactory { + public abort_txns_resultStandardScheme getScheme() { + return new abort_txns_resultStandardScheme(); } } - private static class open_txns_resultStandardScheme extends StandardScheme { + private static class abort_txns_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175751,11 +177517,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_result st break; } switch (schemeField.id) { - case 0: // SUCCESS + case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new OpenTxnsResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -175769,13 +177535,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_result st struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, open_txns_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txns_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -175784,53 +177550,53 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, open_txns_result s } - private static class open_txns_resultTupleSchemeFactory implements SchemeFactory { - public open_txns_resultTupleScheme getScheme() { - return new open_txns_resultTupleScheme(); + private static class abort_txns_resultTupleSchemeFactory implements SchemeFactory { + public abort_txns_resultTupleScheme getScheme() { + return new abort_txns_resultTupleScheme(); } } - private static class open_txns_resultTupleScheme extends TupleScheme { + private static class abort_txns_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, open_txns_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, abort_txns_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { + if (struct.isSetO1()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); + if (struct.isSetO1()) { + struct.o1.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, open_txns_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, abort_txns_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new OpenTxnsResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txn_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txn_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class commit_txn_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("commit_txn_args"); private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new abort_txn_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new abort_txn_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new commit_txn_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new commit_txn_argsTupleSchemeFactory()); } - private AbortTxnRequest rqst; // required + private CommitTxnRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -175895,16 +177661,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AbortTxnRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CommitTxnRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txn_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(commit_txn_args.class, metaDataMap); } - public abort_txn_args() { + public commit_txn_args() { } - public abort_txn_args( - AbortTxnRequest rqst) + public commit_txn_args( + CommitTxnRequest rqst) { this(); this.rqst = rqst; @@ -175913,14 +177679,14 @@ public abort_txn_args( /** * Performs a deep copy on other. */ - public abort_txn_args(abort_txn_args other) { + public commit_txn_args(commit_txn_args other) { if (other.isSetRqst()) { - this.rqst = new AbortTxnRequest(other.rqst); + this.rqst = new CommitTxnRequest(other.rqst); } } - public abort_txn_args deepCopy() { - return new abort_txn_args(this); + public commit_txn_args deepCopy() { + return new commit_txn_args(this); } @Override @@ -175928,11 +177694,11 @@ public void clear() { this.rqst = null; } - public AbortTxnRequest getRqst() { + public CommitTxnRequest getRqst() { return this.rqst; } - public void setRqst(AbortTxnRequest rqst) { + public void setRqst(CommitTxnRequest rqst) { this.rqst = rqst; } @@ -175957,7 +177723,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRqst(); } else { - setRqst((AbortTxnRequest)value); + setRqst((CommitTxnRequest)value); } break; @@ -175990,12 +177756,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof abort_txn_args) - return this.equals((abort_txn_args)that); + if (that instanceof commit_txn_args) + return this.equals((commit_txn_args)that); return false; } - public boolean equals(abort_txn_args that) { + public boolean equals(commit_txn_args that) { if (that == null) return false; @@ -176024,7 +177790,7 @@ public int hashCode() { } @Override - public int compareTo(abort_txn_args other) { + public int compareTo(commit_txn_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -176058,7 +177824,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("abort_txn_args("); + StringBuilder sb = new StringBuilder("commit_txn_args("); boolean first = true; sb.append("rqst:"); @@ -176096,15 +177862,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class abort_txn_argsStandardSchemeFactory implements SchemeFactory { - public abort_txn_argsStandardScheme getScheme() { - return new abort_txn_argsStandardScheme(); + private static class commit_txn_argsStandardSchemeFactory implements SchemeFactory { + public commit_txn_argsStandardScheme getScheme() { + return new commit_txn_argsStandardScheme(); } } - private static class abort_txn_argsStandardScheme extends StandardScheme { + private static class commit_txn_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -176116,7 +177882,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_args stru switch (schemeField.id) { case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new AbortTxnRequest(); + struct.rqst = new CommitTxnRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } else { @@ -176132,7 +177898,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_args stru struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txn_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, commit_txn_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -176147,16 +177913,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txn_args str } - private static class abort_txn_argsTupleSchemeFactory implements SchemeFactory { - public abort_txn_argsTupleScheme getScheme() { - return new abort_txn_argsTupleScheme(); + private static class commit_txn_argsTupleSchemeFactory implements SchemeFactory { + public commit_txn_argsTupleScheme getScheme() { + return new commit_txn_argsTupleScheme(); } } - private static class abort_txn_argsTupleScheme extends TupleScheme { + private static class commit_txn_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, abort_txn_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, commit_txn_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRqst()) { @@ -176169,11 +177935,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, abort_txn_args stru } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, abort_txn_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, commit_txn_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new AbortTxnRequest(); + struct.rqst = new CommitTxnRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } @@ -176182,22 +177948,25 @@ public void read(org.apache.thrift.protocol.TProtocol prot, abort_txn_args struc } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txn_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txn_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class commit_txn_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("commit_txn_result"); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new abort_txn_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new abort_txn_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new commit_txn_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new commit_txn_resultTupleSchemeFactory()); } private NoSuchTxnException o1; // required + private TxnAbortedException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - O1((short)1, "o1"); + O1((short)1, "o1"), + O2((short)2, "o2"); private static final Map byName = new HashMap(); @@ -176214,6 +177983,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // O1 return O1; + case 2: // O2 + return O2; default: return null; } @@ -176259,36 +178030,44 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txn_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(commit_txn_result.class, metaDataMap); } - public abort_txn_result() { + public commit_txn_result() { } - public abort_txn_result( - NoSuchTxnException o1) + public commit_txn_result( + NoSuchTxnException o1, + TxnAbortedException o2) { this(); this.o1 = o1; + this.o2 = o2; } /** * Performs a deep copy on other. */ - public abort_txn_result(abort_txn_result other) { + public commit_txn_result(commit_txn_result other) { if (other.isSetO1()) { this.o1 = new NoSuchTxnException(other.o1); } + if (other.isSetO2()) { + this.o2 = new TxnAbortedException(other.o2); + } } - public abort_txn_result deepCopy() { - return new abort_txn_result(this); + public commit_txn_result deepCopy() { + return new commit_txn_result(this); } @Override public void clear() { this.o1 = null; + this.o2 = null; } public NoSuchTxnException getO1() { @@ -176314,6 +178093,29 @@ public void setO1IsSet(boolean value) { } } + public TxnAbortedException getO2() { + return this.o2; + } + + public void setO2(TxnAbortedException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case O1: @@ -176324,6 +178126,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((TxnAbortedException)value); + } + break; + } } @@ -176332,6 +178142,9 @@ public Object getFieldValue(_Fields field) { case O1: return getO1(); + case O2: + return getO2(); + } throw new IllegalStateException(); } @@ -176345,6 +178158,8 @@ public boolean isSet(_Fields field) { switch (field) { case O1: return isSetO1(); + case O2: + return isSetO2(); } throw new IllegalStateException(); } @@ -176353,12 +178168,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof abort_txn_result) - return this.equals((abort_txn_result)that); + if (that instanceof commit_txn_result) + return this.equals((commit_txn_result)that); return false; } - public boolean equals(abort_txn_result that) { + public boolean equals(commit_txn_result that) { if (that == null) return false; @@ -176371,6 +178186,15 @@ public boolean equals(abort_txn_result that) { return false; } + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + return true; } @@ -176383,11 +178207,16 @@ public int hashCode() { if (present_o1) list.add(o1); + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + return list.hashCode(); } @Override - public int compareTo(abort_txn_result other) { + public int compareTo(commit_txn_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -176404,6 +178233,16 @@ public int compareTo(abort_txn_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -176421,7 +178260,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("abort_txn_result("); + StringBuilder sb = new StringBuilder("commit_txn_result("); boolean first = true; sb.append("o1:"); @@ -176431,6 +178270,14 @@ public String toString() { sb.append(this.o1); } first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; sb.append(")"); return sb.toString(); } @@ -176456,15 +178303,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class abort_txn_resultStandardSchemeFactory implements SchemeFactory { - public abort_txn_resultStandardScheme getScheme() { - return new abort_txn_resultStandardScheme(); + private static class commit_txn_resultStandardSchemeFactory implements SchemeFactory { + public commit_txn_resultStandardScheme getScheme() { + return new commit_txn_resultStandardScheme(); } } - private static class abort_txn_resultStandardScheme extends StandardScheme { + private static class commit_txn_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -176483,6 +178330,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_result st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new TxnAbortedException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -176492,7 +178348,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_result st struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txn_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, commit_txn_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -176501,59 +178357,75 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txn_result s struct.o1.write(oprot); oprot.writeFieldEnd(); } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class abort_txn_resultTupleSchemeFactory implements SchemeFactory { - public abort_txn_resultTupleScheme getScheme() { - return new abort_txn_resultTupleScheme(); + private static class commit_txn_resultTupleSchemeFactory implements SchemeFactory { + public commit_txn_resultTupleScheme getScheme() { + return new commit_txn_resultTupleScheme(); } } - private static class abort_txn_resultTupleScheme extends TupleScheme { + private static class commit_txn_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, abort_txn_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, commit_txn_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetO1()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetO2()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetO1()) { struct.o1.write(oprot); } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, abort_txn_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, commit_txn_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.o1 = new NoSuchTxnException(); struct.o1.read(iprot); struct.setO1IsSet(true); } + if (incoming.get(1)) { + struct.o2 = new TxnAbortedException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txns_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_write_ids_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_write_ids_args"); private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new abort_txns_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new abort_txns_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_open_write_ids_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_open_write_ids_argsTupleSchemeFactory()); } - private AbortTxnsRequest rqst; // required + private GetOpenWriteIdsRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -176618,16 +178490,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AbortTxnsRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetOpenWriteIdsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txns_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_write_ids_args.class, metaDataMap); } - public abort_txns_args() { + public get_open_write_ids_args() { } - public abort_txns_args( - AbortTxnsRequest rqst) + public get_open_write_ids_args( + GetOpenWriteIdsRequest rqst) { this(); this.rqst = rqst; @@ -176636,14 +178508,14 @@ public abort_txns_args( /** * Performs a deep copy on other. */ - public abort_txns_args(abort_txns_args other) { + public get_open_write_ids_args(get_open_write_ids_args other) { if (other.isSetRqst()) { - this.rqst = new AbortTxnsRequest(other.rqst); + this.rqst = new GetOpenWriteIdsRequest(other.rqst); } } - public abort_txns_args deepCopy() { - return new abort_txns_args(this); + public get_open_write_ids_args deepCopy() { + return new get_open_write_ids_args(this); } @Override @@ -176651,11 +178523,11 @@ public void clear() { this.rqst = null; } - public AbortTxnsRequest getRqst() { + public GetOpenWriteIdsRequest getRqst() { return this.rqst; } - public void setRqst(AbortTxnsRequest rqst) { + public void setRqst(GetOpenWriteIdsRequest rqst) { this.rqst = rqst; } @@ -176680,7 +178552,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRqst(); } else { - setRqst((AbortTxnsRequest)value); + setRqst((GetOpenWriteIdsRequest)value); } break; @@ -176713,12 +178585,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof abort_txns_args) - return this.equals((abort_txns_args)that); + if (that instanceof get_open_write_ids_args) + return this.equals((get_open_write_ids_args)that); return false; } - public boolean equals(abort_txns_args that) { + public boolean equals(get_open_write_ids_args that) { if (that == null) return false; @@ -176747,7 +178619,7 @@ public int hashCode() { } @Override - public int compareTo(abort_txns_args other) { + public int compareTo(get_open_write_ids_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -176781,7 +178653,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("abort_txns_args("); + StringBuilder sb = new StringBuilder("get_open_write_ids_args("); boolean first = true; sb.append("rqst:"); @@ -176819,15 +178691,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class abort_txns_argsStandardSchemeFactory implements SchemeFactory { - public abort_txns_argsStandardScheme getScheme() { - return new abort_txns_argsStandardScheme(); + private static class get_open_write_ids_argsStandardSchemeFactory implements SchemeFactory { + public get_open_write_ids_argsStandardScheme getScheme() { + return new get_open_write_ids_argsStandardScheme(); } } - private static class abort_txns_argsStandardScheme extends StandardScheme { + private static class get_open_write_ids_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_write_ids_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -176839,7 +178711,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_args str switch (schemeField.id) { case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new AbortTxnsRequest(); + struct.rqst = new GetOpenWriteIdsRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } else { @@ -176855,7 +178727,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_args str struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txns_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_write_ids_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -176870,16 +178742,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txns_args st } - private static class abort_txns_argsTupleSchemeFactory implements SchemeFactory { - public abort_txns_argsTupleScheme getScheme() { - return new abort_txns_argsTupleScheme(); + private static class get_open_write_ids_argsTupleSchemeFactory implements SchemeFactory { + public get_open_write_ids_argsTupleScheme getScheme() { + return new get_open_write_ids_argsTupleScheme(); } } - private static class abort_txns_argsTupleScheme extends TupleScheme { + private static class get_open_write_ids_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, abort_txns_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_open_write_ids_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRqst()) { @@ -176892,11 +178764,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, abort_txns_args str } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, abort_txns_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_open_write_ids_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new AbortTxnsRequest(); + struct.rqst = new GetOpenWriteIdsRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } @@ -176905,22 +178777,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, abort_txns_args stru } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txns_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_write_ids_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_write_ids_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new abort_txns_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new abort_txns_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_open_write_ids_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_open_write_ids_resultTupleSchemeFactory()); } + private GetOpenWriteIdsResponse success; // required private NoSuchTxnException o1; // required + private MetaException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - O1((short)1, "o1"); + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); private static final Map byName = new HashMap(); @@ -176935,8 +178813,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, abort_txns_args stru */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; case 1: // O1 return O1; + case 2: // O2 + return O2; default: return null; } @@ -176980,38 +178862,77 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetOpenWriteIdsResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txns_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_write_ids_result.class, metaDataMap); } - public abort_txns_result() { + public get_open_write_ids_result() { } - public abort_txns_result( - NoSuchTxnException o1) + public get_open_write_ids_result( + GetOpenWriteIdsResponse success, + NoSuchTxnException o1, + MetaException o2) { this(); + this.success = success; this.o1 = o1; + this.o2 = o2; } /** * Performs a deep copy on other. */ - public abort_txns_result(abort_txns_result other) { + public get_open_write_ids_result(get_open_write_ids_result other) { + if (other.isSetSuccess()) { + this.success = new GetOpenWriteIdsResponse(other.success); + } if (other.isSetO1()) { this.o1 = new NoSuchTxnException(other.o1); } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } } - public abort_txns_result deepCopy() { - return new abort_txns_result(this); + public get_open_write_ids_result deepCopy() { + return new get_open_write_ids_result(this); } @Override public void clear() { + this.success = null; this.o1 = null; + this.o2 = null; + } + + public GetOpenWriteIdsResponse getSuccess() { + return this.success; + } + + public void setSuccess(GetOpenWriteIdsResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } } public NoSuchTxnException getO1() { @@ -177037,8 +178958,39 @@ public void setO1IsSet(boolean value) { } } + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((GetOpenWriteIdsResponse)value); + } + break; + case O1: if (value == null) { unsetO1(); @@ -177047,14 +178999,28 @@ public void setFieldValue(_Fields field, Object value) { } break; + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + case O1: return getO1(); + case O2: + return getO2(); + } throw new IllegalStateException(); } @@ -177066,8 +179032,12 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); case O1: return isSetO1(); + case O2: + return isSetO2(); } throw new IllegalStateException(); } @@ -177076,15 +179046,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof abort_txns_result) - return this.equals((abort_txns_result)that); + if (that instanceof get_open_write_ids_result) + return this.equals((get_open_write_ids_result)that); return false; } - public boolean equals(abort_txns_result that) { + public boolean equals(get_open_write_ids_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + boolean this_present_o1 = true && this.isSetO1(); boolean that_present_o1 = true && that.isSetO1(); if (this_present_o1 || that_present_o1) { @@ -177094,6 +179073,15 @@ public boolean equals(abort_txns_result that) { return false; } + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + return true; } @@ -177101,22 +179089,42 @@ public boolean equals(abort_txns_result that) { public int hashCode() { List list = new ArrayList(); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + boolean present_o1 = true && (isSetO1()); list.add(present_o1); if (present_o1) list.add(o1); + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + return list.hashCode(); } @Override - public int compareTo(abort_txns_result other) { + public int compareTo(get_open_write_ids_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; @@ -177127,6 +179135,16 @@ public int compareTo(abort_txns_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -177144,9 +179162,17 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("abort_txns_result("); + StringBuilder sb = new StringBuilder("get_open_write_ids_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); sb.append("o1:"); if (this.o1 == null) { sb.append("null"); @@ -177154,6 +179180,14 @@ public String toString() { sb.append(this.o1); } first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; sb.append(")"); return sb.toString(); } @@ -177161,6 +179195,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -177179,15 +179216,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class abort_txns_resultStandardSchemeFactory implements SchemeFactory { - public abort_txns_resultStandardScheme getScheme() { - return new abort_txns_resultStandardScheme(); + private static class get_open_write_ids_resultStandardSchemeFactory implements SchemeFactory { + public get_open_write_ids_resultStandardScheme getScheme() { + return new get_open_write_ids_resultStandardScheme(); } } - private static class abort_txns_resultStandardScheme extends StandardScheme { + private static class get_open_write_ids_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_write_ids_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -177197,6 +179234,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_result s break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new GetOpenWriteIdsResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.o1 = new NoSuchTxnException(); @@ -177206,6 +179252,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_result s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -177215,68 +179270,100 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_result s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txns_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_write_ids_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } if (struct.o1 != null) { oprot.writeFieldBegin(O1_FIELD_DESC); struct.o1.write(oprot); oprot.writeFieldEnd(); } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class abort_txns_resultTupleSchemeFactory implements SchemeFactory { - public abort_txns_resultTupleScheme getScheme() { - return new abort_txns_resultTupleScheme(); + private static class get_open_write_ids_resultTupleSchemeFactory implements SchemeFactory { + public get_open_write_ids_resultTupleScheme getScheme() { + return new get_open_write_ids_resultTupleScheme(); } } - private static class abort_txns_resultTupleScheme extends TupleScheme { + private static class get_open_write_ids_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, abort_txns_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_open_write_ids_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetO1()) { + if (struct.isSetSuccess()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } if (struct.isSetO1()) { struct.o1.write(oprot); } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, abort_txns_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_open_write_ids_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { + struct.success = new GetOpenWriteIdsResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { struct.o1 = new NoSuchTxnException(); struct.o1.read(iprot); struct.setO1IsSet(true); } + if (incoming.get(2)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class commit_txn_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("commit_txn_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class allocate_table_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("allocate_table_write_id_args"); private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new commit_txn_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new commit_txn_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new allocate_table_write_id_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new allocate_table_write_id_argsTupleSchemeFactory()); } - private CommitTxnRequest rqst; // required + private AllocateTableWriteIdRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -177341,16 +179428,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CommitTxnRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AllocateTableWriteIdRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(commit_txn_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(allocate_table_write_id_args.class, metaDataMap); } - public commit_txn_args() { + public allocate_table_write_id_args() { } - public commit_txn_args( - CommitTxnRequest rqst) + public allocate_table_write_id_args( + AllocateTableWriteIdRequest rqst) { this(); this.rqst = rqst; @@ -177359,14 +179446,14 @@ public commit_txn_args( /** * Performs a deep copy on other. */ - public commit_txn_args(commit_txn_args other) { + public allocate_table_write_id_args(allocate_table_write_id_args other) { if (other.isSetRqst()) { - this.rqst = new CommitTxnRequest(other.rqst); + this.rqst = new AllocateTableWriteIdRequest(other.rqst); } } - public commit_txn_args deepCopy() { - return new commit_txn_args(this); + public allocate_table_write_id_args deepCopy() { + return new allocate_table_write_id_args(this); } @Override @@ -177374,11 +179461,11 @@ public void clear() { this.rqst = null; } - public CommitTxnRequest getRqst() { + public AllocateTableWriteIdRequest getRqst() { return this.rqst; } - public void setRqst(CommitTxnRequest rqst) { + public void setRqst(AllocateTableWriteIdRequest rqst) { this.rqst = rqst; } @@ -177403,7 +179490,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRqst(); } else { - setRqst((CommitTxnRequest)value); + setRqst((AllocateTableWriteIdRequest)value); } break; @@ -177436,12 +179523,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof commit_txn_args) - return this.equals((commit_txn_args)that); + if (that instanceof allocate_table_write_id_args) + return this.equals((allocate_table_write_id_args)that); return false; } - public boolean equals(commit_txn_args that) { + public boolean equals(allocate_table_write_id_args that) { if (that == null) return false; @@ -177470,7 +179557,7 @@ public int hashCode() { } @Override - public int compareTo(commit_txn_args other) { + public int compareTo(allocate_table_write_id_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -177504,7 +179591,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("commit_txn_args("); + StringBuilder sb = new StringBuilder("allocate_table_write_id_args("); boolean first = true; sb.append("rqst:"); @@ -177542,15 +179629,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class commit_txn_argsStandardSchemeFactory implements SchemeFactory { - public commit_txn_argsStandardScheme getScheme() { - return new commit_txn_argsStandardScheme(); + private static class allocate_table_write_id_argsStandardSchemeFactory implements SchemeFactory { + public allocate_table_write_id_argsStandardScheme getScheme() { + return new allocate_table_write_id_argsStandardScheme(); } } - private static class commit_txn_argsStandardScheme extends StandardScheme { + private static class allocate_table_write_id_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, allocate_table_write_id_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -177562,7 +179649,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_args str switch (schemeField.id) { case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new CommitTxnRequest(); + struct.rqst = new AllocateTableWriteIdRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } else { @@ -177578,7 +179665,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_args str struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, commit_txn_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, allocate_table_write_id_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -177593,16 +179680,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, commit_txn_args st } - private static class commit_txn_argsTupleSchemeFactory implements SchemeFactory { - public commit_txn_argsTupleScheme getScheme() { - return new commit_txn_argsTupleScheme(); + private static class allocate_table_write_id_argsTupleSchemeFactory implements SchemeFactory { + public allocate_table_write_id_argsTupleScheme getScheme() { + return new allocate_table_write_id_argsTupleScheme(); } } - private static class commit_txn_argsTupleScheme extends TupleScheme { + private static class allocate_table_write_id_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, commit_txn_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, allocate_table_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRqst()) { @@ -177615,11 +179702,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, commit_txn_args str } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, commit_txn_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, allocate_table_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new CommitTxnRequest(); + struct.rqst = new AllocateTableWriteIdRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } @@ -177628,25 +179715,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, commit_txn_args stru } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class commit_txn_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("commit_txn_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class allocate_table_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("allocate_table_write_id_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new commit_txn_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new commit_txn_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new allocate_table_write_id_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new allocate_table_write_id_resultTupleSchemeFactory()); } + private AllocateTableWriteIdResponse success; // required private NoSuchTxnException o1; // required private TxnAbortedException o2; // required + private MetaException o3; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), O1((short)1, "o1"), - O2((short)2, "o2"); + O2((short)2, "o2"), + O3((short)3, "o3"); private static final Map byName = new HashMap(); @@ -177661,10 +179754,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, commit_txn_args stru */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; case 1: // O1 return O1; case 2: // O2 return O2; + case 3: // O3 + return O3; default: return null; } @@ -177708,46 +179805,85 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AllocateTableWriteIdResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(commit_txn_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(allocate_table_write_id_result.class, metaDataMap); } - public commit_txn_result() { + public allocate_table_write_id_result() { } - public commit_txn_result( + public allocate_table_write_id_result( + AllocateTableWriteIdResponse success, NoSuchTxnException o1, - TxnAbortedException o2) + TxnAbortedException o2, + MetaException o3) { this(); + this.success = success; this.o1 = o1; this.o2 = o2; + this.o3 = o3; } /** * Performs a deep copy on other. */ - public commit_txn_result(commit_txn_result other) { + public allocate_table_write_id_result(allocate_table_write_id_result other) { + if (other.isSetSuccess()) { + this.success = new AllocateTableWriteIdResponse(other.success); + } if (other.isSetO1()) { this.o1 = new NoSuchTxnException(other.o1); } if (other.isSetO2()) { this.o2 = new TxnAbortedException(other.o2); } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } } - public commit_txn_result deepCopy() { - return new commit_txn_result(this); + public allocate_table_write_id_result deepCopy() { + return new allocate_table_write_id_result(this); } @Override public void clear() { + this.success = null; this.o1 = null; this.o2 = null; + this.o3 = null; + } + + public AllocateTableWriteIdResponse getSuccess() { + return this.success; + } + + public void setSuccess(AllocateTableWriteIdResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } } public NoSuchTxnException getO1() { @@ -177796,8 +179932,39 @@ public void setO2IsSet(boolean value) { } } + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((AllocateTableWriteIdResponse)value); + } + break; + case O1: if (value == null) { unsetO1(); @@ -177814,17 +179981,31 @@ public void setFieldValue(_Fields field, Object value) { } break; + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + case O1: return getO1(); case O2: return getO2(); + case O3: + return getO3(); + } throw new IllegalStateException(); } @@ -177836,10 +180017,14 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); case O1: return isSetO1(); case O2: return isSetO2(); + case O3: + return isSetO3(); } throw new IllegalStateException(); } @@ -177848,15 +180033,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof commit_txn_result) - return this.equals((commit_txn_result)that); + if (that instanceof allocate_table_write_id_result) + return this.equals((allocate_table_write_id_result)that); return false; } - public boolean equals(commit_txn_result that) { + public boolean equals(allocate_table_write_id_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + boolean this_present_o1 = true && this.isSetO1(); boolean that_present_o1 = true && that.isSetO1(); if (this_present_o1 || that_present_o1) { @@ -177875,6 +180069,15 @@ public boolean equals(commit_txn_result that) { return false; } + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + return true; } @@ -177882,6 +180085,11 @@ public boolean equals(commit_txn_result that) { public int hashCode() { List list = new ArrayList(); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + boolean present_o1 = true && (isSetO1()); list.add(present_o1); if (present_o1) @@ -177892,17 +180100,32 @@ public int hashCode() { if (present_o2) list.add(o2); + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + return list.hashCode(); } @Override - public int compareTo(commit_txn_result other) { + public int compareTo(allocate_table_write_id_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; @@ -177923,6 +180146,16 @@ public int compareTo(commit_txn_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -177940,9 +180173,17 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("commit_txn_result("); + StringBuilder sb = new StringBuilder("allocate_table_write_id_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); sb.append("o1:"); if (this.o1 == null) { sb.append("null"); @@ -177958,6 +180199,14 @@ public String toString() { sb.append(this.o2); } first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; sb.append(")"); return sb.toString(); } @@ -177965,6 +180214,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -177983,15 +180235,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class commit_txn_resultStandardSchemeFactory implements SchemeFactory { - public commit_txn_resultStandardScheme getScheme() { - return new commit_txn_resultStandardScheme(); + private static class allocate_table_write_id_resultStandardSchemeFactory implements SchemeFactory { + public allocate_table_write_id_resultStandardScheme getScheme() { + return new allocate_table_write_id_resultStandardScheme(); } } - private static class commit_txn_resultStandardScheme extends StandardScheme { + private static class allocate_table_write_id_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, allocate_table_write_id_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -178001,6 +180253,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_result s break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new AllocateTableWriteIdResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.o1 = new NoSuchTxnException(); @@ -178019,6 +180280,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_result s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -178028,10 +180298,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_result s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, commit_txn_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, allocate_table_write_id_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } if (struct.o1 != null) { oprot.writeFieldBegin(O1_FIELD_DESC); struct.o1.write(oprot); @@ -178042,53 +180317,80 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, commit_txn_result struct.o2.write(oprot); oprot.writeFieldEnd(); } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class commit_txn_resultTupleSchemeFactory implements SchemeFactory { - public commit_txn_resultTupleScheme getScheme() { - return new commit_txn_resultTupleScheme(); + private static class allocate_table_write_id_resultTupleSchemeFactory implements SchemeFactory { + public allocate_table_write_id_resultTupleScheme getScheme() { + return new allocate_table_write_id_resultTupleScheme(); } } - private static class commit_txn_resultTupleScheme extends TupleScheme { + private static class allocate_table_write_id_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, commit_txn_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, allocate_table_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetO1()) { + if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO2()) { + if (struct.isSetO1()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetO2()) { + optionals.set(2); + } + if (struct.isSetO3()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } if (struct.isSetO1()) { struct.o1.write(oprot); } if (struct.isSetO2()) { struct.o2.write(oprot); } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, commit_txn_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, allocate_table_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { + struct.success = new AllocateTableWriteIdResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { struct.o1 = new NoSuchTxnException(); struct.o1.read(iprot); struct.setO1IsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.o2 = new TxnAbortedException(); struct.o2.read(iprot); struct.setO2IsSet(true); } + if (incoming.get(3)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } } } diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnToWriteId.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnToWriteId.java new file mode 100644 index 0000000..4d5f435 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnToWriteId.java @@ -0,0 +1,482 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class TxnToWriteId implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TxnToWriteId"); + + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)1); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TxnToWriteIdStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TxnToWriteIdTupleSchemeFactory()); + } + + private long txnId; // required + private long writeId; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TXN_ID((short)1, "txnId"), + WRITE_ID((short)2, "writeId"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TXN_ID + return TXN_ID; + case 2: // WRITE_ID + return WRITE_ID; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __TXNID_ISSET_ID = 0; + private static final int __WRITEID_ISSET_ID = 1; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TxnToWriteId.class, metaDataMap); + } + + public TxnToWriteId() { + } + + public TxnToWriteId( + long txnId, + long writeId) + { + this(); + this.txnId = txnId; + setTxnIdIsSet(true); + this.writeId = writeId; + setWriteIdIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public TxnToWriteId(TxnToWriteId other) { + __isset_bitfield = other.__isset_bitfield; + this.txnId = other.txnId; + this.writeId = other.writeId; + } + + public TxnToWriteId deepCopy() { + return new TxnToWriteId(this); + } + + @Override + public void clear() { + setTxnIdIsSet(false); + this.txnId = 0; + setWriteIdIsSet(false); + this.writeId = 0; + } + + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TXN_ID: + return getTxnId(); + + case WRITE_ID: + return getWriteId(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TXN_ID: + return isSetTxnId(); + case WRITE_ID: + return isSetWriteId(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TxnToWriteId) + return this.equals((TxnToWriteId)that); + return false; + } + + public boolean equals(TxnToWriteId that) { + if (that == null) + return false; + + boolean this_present_txnId = true; + boolean that_present_txnId = true; + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_writeId = true; + boolean that_present_writeId = true; + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_txnId = true; + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_writeId = true; + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + return list.hashCode(); + } + + @Override + public int compareTo(TxnToWriteId other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TxnToWriteId("); + boolean first = true; + + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetTxnId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'txnId' is unset! Struct:" + toString()); + } + + if (!isSetWriteId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeId' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TxnToWriteIdStandardSchemeFactory implements SchemeFactory { + public TxnToWriteIdStandardScheme getScheme() { + return new TxnToWriteIdStandardScheme(); + } + } + + private static class TxnToWriteIdStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TxnToWriteId struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TxnToWriteId struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TxnToWriteIdTupleSchemeFactory implements SchemeFactory { + public TxnToWriteIdTupleScheme getScheme() { + return new TxnToWriteIdTupleScheme(); + } + } + + private static class TxnToWriteIdTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TxnToWriteId struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.txnId); + oprot.writeI64(struct.writeId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TxnToWriteId struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java index 01654c7..9c05a18 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java @@ -755,14 +755,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 2: // POOLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list754 = iprot.readListBegin(); - struct.pools = new ArrayList(_list754.size); - WMPool _elem755; - for (int _i756 = 0; _i756 < _list754.size; ++_i756) + org.apache.thrift.protocol.TList _list794 = iprot.readListBegin(); + struct.pools = new ArrayList(_list794.size); + WMPool _elem795; + for (int _i796 = 0; _i796 < _list794.size; ++_i796) { - _elem755 = new WMPool(); - _elem755.read(iprot); - struct.pools.add(_elem755); + _elem795 = new WMPool(); + _elem795.read(iprot); + struct.pools.add(_elem795); } iprot.readListEnd(); } @@ -774,14 +774,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 3: // MAPPINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list757 = iprot.readListBegin(); - struct.mappings = new ArrayList(_list757.size); - WMMapping _elem758; - for (int _i759 = 0; _i759 < _list757.size; ++_i759) + org.apache.thrift.protocol.TList _list797 = iprot.readListBegin(); + struct.mappings = new ArrayList(_list797.size); + WMMapping _elem798; + for (int _i799 = 0; _i799 < _list797.size; ++_i799) { - _elem758 = new WMMapping(); - _elem758.read(iprot); - struct.mappings.add(_elem758); + _elem798 = new WMMapping(); + _elem798.read(iprot); + struct.mappings.add(_elem798); } iprot.readListEnd(); } @@ -793,14 +793,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 4: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list760 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list760.size); - WMTrigger _elem761; - for (int _i762 = 0; _i762 < _list760.size; ++_i762) + org.apache.thrift.protocol.TList _list800 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list800.size); + WMTrigger _elem801; + for (int _i802 = 0; _i802 < _list800.size; ++_i802) { - _elem761 = new WMTrigger(); - _elem761.read(iprot); - struct.triggers.add(_elem761); + _elem801 = new WMTrigger(); + _elem801.read(iprot); + struct.triggers.add(_elem801); } iprot.readListEnd(); } @@ -812,14 +812,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 5: // POOL_TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list763 = iprot.readListBegin(); - struct.poolTriggers = new ArrayList(_list763.size); - WMPoolTrigger _elem764; - for (int _i765 = 0; _i765 < _list763.size; ++_i765) + org.apache.thrift.protocol.TList _list803 = iprot.readListBegin(); + struct.poolTriggers = new ArrayList(_list803.size); + WMPoolTrigger _elem804; + for (int _i805 = 0; _i805 < _list803.size; ++_i805) { - _elem764 = new WMPoolTrigger(); - _elem764.read(iprot); - struct.poolTriggers.add(_elem764); + _elem804 = new WMPoolTrigger(); + _elem804.read(iprot); + struct.poolTriggers.add(_elem804); } iprot.readListEnd(); } @@ -850,9 +850,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.pools.size())); - for (WMPool _iter766 : struct.pools) + for (WMPool _iter806 : struct.pools) { - _iter766.write(oprot); + _iter806.write(oprot); } oprot.writeListEnd(); } @@ -863,9 +863,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(MAPPINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mappings.size())); - for (WMMapping _iter767 : struct.mappings) + for (WMMapping _iter807 : struct.mappings) { - _iter767.write(oprot); + _iter807.write(oprot); } oprot.writeListEnd(); } @@ -877,9 +877,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter768 : struct.triggers) + for (WMTrigger _iter808 : struct.triggers) { - _iter768.write(oprot); + _iter808.write(oprot); } oprot.writeListEnd(); } @@ -891,9 +891,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOL_TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.poolTriggers.size())); - for (WMPoolTrigger _iter769 : struct.poolTriggers) + for (WMPoolTrigger _iter809 : struct.poolTriggers) { - _iter769.write(oprot); + _iter809.write(oprot); } oprot.writeListEnd(); } @@ -920,9 +920,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan struct.plan.write(oprot); { oprot.writeI32(struct.pools.size()); - for (WMPool _iter770 : struct.pools) + for (WMPool _iter810 : struct.pools) { - _iter770.write(oprot); + _iter810.write(oprot); } } BitSet optionals = new BitSet(); @@ -939,27 +939,27 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan if (struct.isSetMappings()) { { oprot.writeI32(struct.mappings.size()); - for (WMMapping _iter771 : struct.mappings) + for (WMMapping _iter811 : struct.mappings) { - _iter771.write(oprot); + _iter811.write(oprot); } } } if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter772 : struct.triggers) + for (WMTrigger _iter812 : struct.triggers) { - _iter772.write(oprot); + _iter812.write(oprot); } } } if (struct.isSetPoolTriggers()) { { oprot.writeI32(struct.poolTriggers.size()); - for (WMPoolTrigger _iter773 : struct.poolTriggers) + for (WMPoolTrigger _iter813 : struct.poolTriggers) { - _iter773.write(oprot); + _iter813.write(oprot); } } } @@ -972,56 +972,56 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan s struct.plan.read(iprot); struct.setPlanIsSet(true); { - org.apache.thrift.protocol.TList _list774 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.pools = new ArrayList(_list774.size); - WMPool _elem775; - for (int _i776 = 0; _i776 < _list774.size; ++_i776) + org.apache.thrift.protocol.TList _list814 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.pools = new ArrayList(_list814.size); + WMPool _elem815; + for (int _i816 = 0; _i816 < _list814.size; ++_i816) { - _elem775 = new WMPool(); - _elem775.read(iprot); - struct.pools.add(_elem775); + _elem815 = new WMPool(); + _elem815.read(iprot); + struct.pools.add(_elem815); } } struct.setPoolsIsSet(true); BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.mappings = new ArrayList(_list777.size); - WMMapping _elem778; - for (int _i779 = 0; _i779 < _list777.size; ++_i779) + org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.mappings = new ArrayList(_list817.size); + WMMapping _elem818; + for (int _i819 = 0; _i819 < _list817.size; ++_i819) { - _elem778 = new WMMapping(); - _elem778.read(iprot); - struct.mappings.add(_elem778); + _elem818 = new WMMapping(); + _elem818.read(iprot); + struct.mappings.add(_elem818); } } struct.setMappingsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list780 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list780.size); - WMTrigger _elem781; - for (int _i782 = 0; _i782 < _list780.size; ++_i782) + org.apache.thrift.protocol.TList _list820 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list820.size); + WMTrigger _elem821; + for (int _i822 = 0; _i822 < _list820.size; ++_i822) { - _elem781 = new WMTrigger(); - _elem781.read(iprot); - struct.triggers.add(_elem781); + _elem821 = new WMTrigger(); + _elem821.read(iprot); + struct.triggers.add(_elem821); } } struct.setTriggersIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list783 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.poolTriggers = new ArrayList(_list783.size); - WMPoolTrigger _elem784; - for (int _i785 = 0; _i785 < _list783.size; ++_i785) + org.apache.thrift.protocol.TList _list823 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.poolTriggers = new ArrayList(_list823.size); + WMPoolTrigger _elem824; + for (int _i825 = 0; _i825 < _list823.size; ++_i825) { - _elem784 = new WMPoolTrigger(); - _elem784.read(iprot); - struct.poolTriggers.add(_elem784); + _elem824 = new WMPoolTrigger(); + _elem824.read(iprot); + struct.poolTriggers.add(_elem824); } } struct.setPoolTriggersIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java index 69ccf53..ba44e3a 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetAllResourcePla case 1: // RESOURCE_PLANS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list786 = iprot.readListBegin(); - struct.resourcePlans = new ArrayList(_list786.size); - WMResourcePlan _elem787; - for (int _i788 = 0; _i788 < _list786.size; ++_i788) + org.apache.thrift.protocol.TList _list826 = iprot.readListBegin(); + struct.resourcePlans = new ArrayList(_list826.size); + WMResourcePlan _elem827; + for (int _i828 = 0; _i828 < _list826.size; ++_i828) { - _elem787 = new WMResourcePlan(); - _elem787.read(iprot); - struct.resourcePlans.add(_elem787); + _elem827 = new WMResourcePlan(); + _elem827.read(iprot); + struct.resourcePlans.add(_elem827); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetAllResourcePl oprot.writeFieldBegin(RESOURCE_PLANS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourcePlans.size())); - for (WMResourcePlan _iter789 : struct.resourcePlans) + for (WMResourcePlan _iter829 : struct.resourcePlans) { - _iter789.write(oprot); + _iter829.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePla if (struct.isSetResourcePlans()) { { oprot.writeI32(struct.resourcePlans.size()); - for (WMResourcePlan _iter790 : struct.resourcePlans) + for (WMResourcePlan _iter830 : struct.resourcePlans) { - _iter790.write(oprot); + _iter830.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePlan BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list791 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourcePlans = new ArrayList(_list791.size); - WMResourcePlan _elem792; - for (int _i793 = 0; _i793 < _list791.size; ++_i793) + org.apache.thrift.protocol.TList _list831 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourcePlans = new ArrayList(_list831.size); + WMResourcePlan _elem832; + for (int _i833 = 0; _i833 < _list831.size; ++_i833) { - _elem792 = new WMResourcePlan(); - _elem792.read(iprot); - struct.resourcePlans.add(_elem792); + _elem832 = new WMResourcePlan(); + _elem832.read(iprot); + struct.resourcePlans.add(_elem832); } } struct.setResourcePlansIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java index ee30063..edec382 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetTriggersForRes case 1: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list810 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list810.size); - WMTrigger _elem811; - for (int _i812 = 0; _i812 < _list810.size; ++_i812) + org.apache.thrift.protocol.TList _list850 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list850.size); + WMTrigger _elem851; + for (int _i852 = 0; _i852 < _list850.size; ++_i852) { - _elem811 = new WMTrigger(); - _elem811.read(iprot); - struct.triggers.add(_elem811); + _elem851 = new WMTrigger(); + _elem851.read(iprot); + struct.triggers.add(_elem851); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetTriggersForRe oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter813 : struct.triggers) + for (WMTrigger _iter853 : struct.triggers) { - _iter813.write(oprot); + _iter853.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForRes if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter814 : struct.triggers) + for (WMTrigger _iter854 : struct.triggers) { - _iter814.write(oprot); + _iter854.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForReso BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list815 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list815.size); - WMTrigger _elem816; - for (int _i817 = 0; _i817 < _list815.size; ++_i817) + org.apache.thrift.protocol.TList _list855 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list855.size); + WMTrigger _elem856; + for (int _i857 = 0; _i857 < _list855.size; ++_i857) { - _elem816 = new WMTrigger(); - _elem816.read(iprot); - struct.triggers.add(_elem816); + _elem856 = new WMTrigger(); + _elem856.read(iprot); + struct.triggers.add(_elem856); } } struct.setTriggersIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java index 5caf586..228f37f 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java @@ -441,13 +441,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 1: // ERRORS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list794 = iprot.readListBegin(); - struct.errors = new ArrayList(_list794.size); - String _elem795; - for (int _i796 = 0; _i796 < _list794.size; ++_i796) + org.apache.thrift.protocol.TList _list834 = iprot.readListBegin(); + struct.errors = new ArrayList(_list834.size); + String _elem835; + for (int _i836 = 0; _i836 < _list834.size; ++_i836) { - _elem795 = iprot.readString(); - struct.errors.add(_elem795); + _elem835 = iprot.readString(); + struct.errors.add(_elem835); } iprot.readListEnd(); } @@ -459,13 +459,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 2: // WARNINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list797 = iprot.readListBegin(); - struct.warnings = new ArrayList(_list797.size); - String _elem798; - for (int _i799 = 0; _i799 < _list797.size; ++_i799) + org.apache.thrift.protocol.TList _list837 = iprot.readListBegin(); + struct.warnings = new ArrayList(_list837.size); + String _elem838; + for (int _i839 = 0; _i839 < _list837.size; ++_i839) { - _elem798 = iprot.readString(); - struct.warnings.add(_elem798); + _elem838 = iprot.readString(); + struct.warnings.add(_elem838); } iprot.readListEnd(); } @@ -492,9 +492,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(ERRORS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.errors.size())); - for (String _iter800 : struct.errors) + for (String _iter840 : struct.errors) { - oprot.writeString(_iter800); + oprot.writeString(_iter840); } oprot.writeListEnd(); } @@ -506,9 +506,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(WARNINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.warnings.size())); - for (String _iter801 : struct.warnings) + for (String _iter841 : struct.warnings) { - oprot.writeString(_iter801); + oprot.writeString(_iter841); } oprot.writeListEnd(); } @@ -543,18 +543,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMValidateResourceP if (struct.isSetErrors()) { { oprot.writeI32(struct.errors.size()); - for (String _iter802 : struct.errors) + for (String _iter842 : struct.errors) { - oprot.writeString(_iter802); + oprot.writeString(_iter842); } } } if (struct.isSetWarnings()) { { oprot.writeI32(struct.warnings.size()); - for (String _iter803 : struct.warnings) + for (String _iter843 : struct.warnings) { - oprot.writeString(_iter803); + oprot.writeString(_iter843); } } } @@ -566,26 +566,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMValidateResourcePl BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list804 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.errors = new ArrayList(_list804.size); - String _elem805; - for (int _i806 = 0; _i806 < _list804.size; ++_i806) + org.apache.thrift.protocol.TList _list844 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.errors = new ArrayList(_list844.size); + String _elem845; + for (int _i846 = 0; _i846 < _list844.size; ++_i846) { - _elem805 = iprot.readString(); - struct.errors.add(_elem805); + _elem845 = iprot.readString(); + struct.errors.add(_elem845); } } struct.setErrorsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list807 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.warnings = new ArrayList(_list807.size); - String _elem808; - for (int _i809 = 0; _i809 < _list807.size; ++_i809) + org.apache.thrift.protocol.TList _list847 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.warnings = new ArrayList(_list847.size); + String _elem848; + for (int _i849 = 0; _i849 < _list847.size; ++_i849) { - _elem808 = iprot.readString(); - struct.warnings.add(_elem808); + _elem848 = iprot.readString(); + struct.warnings.add(_elem848); } } struct.setWarningsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index 9382c60..7617521 100644 --- a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -1147,6 +1147,21 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { */ public function commit_txn(\metastore\CommitTxnRequest $rqst); /** + * @param \metastore\GetOpenWriteIdsRequest $rqst + * @return \metastore\GetOpenWriteIdsResponse + * @throws \metastore\NoSuchTxnException + * @throws \metastore\MetaException + */ + public function get_open_write_ids(\metastore\GetOpenWriteIdsRequest $rqst); + /** + * @param \metastore\AllocateTableWriteIdRequest $rqst + * @return \metastore\AllocateTableWriteIdResponse + * @throws \metastore\NoSuchTxnException + * @throws \metastore\TxnAbortedException + * @throws \metastore\MetaException + */ + public function allocate_table_write_id(\metastore\AllocateTableWriteIdRequest $rqst); + /** * @param \metastore\LockRequest $rqst * @return \metastore\LockResponse * @throws \metastore\NoSuchTxnException @@ -9553,6 +9568,123 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } + public function get_open_write_ids(\metastore\GetOpenWriteIdsRequest $rqst) + { + $this->send_get_open_write_ids($rqst); + return $this->recv_get_open_write_ids(); + } + + public function send_get_open_write_ids(\metastore\GetOpenWriteIdsRequest $rqst) + { + $args = new \metastore\ThriftHiveMetastore_get_open_write_ids_args(); + $args->rqst = $rqst; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_open_write_ids', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_open_write_ids', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_open_write_ids() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_open_write_ids_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_open_write_ids_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + throw new \Exception("get_open_write_ids failed: unknown result"); + } + + public function allocate_table_write_id(\metastore\AllocateTableWriteIdRequest $rqst) + { + $this->send_allocate_table_write_id($rqst); + return $this->recv_allocate_table_write_id(); + } + + public function send_allocate_table_write_id(\metastore\AllocateTableWriteIdRequest $rqst) + { + $args = new \metastore\ThriftHiveMetastore_allocate_table_write_id_args(); + $args->rqst = $rqst; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'allocate_table_write_id', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('allocate_table_write_id', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_allocate_table_write_id() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_allocate_table_write_id_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_allocate_table_write_id_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + throw new \Exception("allocate_table_write_id failed: unknown result"); + } + public function lock(\metastore\LockRequest $rqst) { $this->send_lock($rqst); @@ -12864,14 +12996,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size724 = 0; - $_etype727 = 0; - $xfer += $input->readListBegin($_etype727, $_size724); - for ($_i728 = 0; $_i728 < $_size724; ++$_i728) + $_size759 = 0; + $_etype762 = 0; + $xfer += $input->readListBegin($_etype762, $_size759); + for ($_i763 = 0; $_i763 < $_size759; ++$_i763) { - $elem729 = null; - $xfer += $input->readString($elem729); - $this->success []= $elem729; + $elem764 = null; + $xfer += $input->readString($elem764); + $this->success []= $elem764; } $xfer += $input->readListEnd(); } else { @@ -12907,9 +13039,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter730) + foreach ($this->success as $iter765) { - $xfer += $output->writeString($iter730); + $xfer += $output->writeString($iter765); } } $output->writeListEnd(); @@ -13040,14 +13172,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size731 = 0; - $_etype734 = 0; - $xfer += $input->readListBegin($_etype734, $_size731); - for ($_i735 = 0; $_i735 < $_size731; ++$_i735) + $_size766 = 0; + $_etype769 = 0; + $xfer += $input->readListBegin($_etype769, $_size766); + for ($_i770 = 0; $_i770 < $_size766; ++$_i770) { - $elem736 = null; - $xfer += $input->readString($elem736); - $this->success []= $elem736; + $elem771 = null; + $xfer += $input->readString($elem771); + $this->success []= $elem771; } $xfer += $input->readListEnd(); } else { @@ -13083,9 +13215,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter737) + foreach ($this->success as $iter772) { - $xfer += $output->writeString($iter737); + $xfer += $output->writeString($iter772); } } $output->writeListEnd(); @@ -14086,18 +14218,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size738 = 0; - $_ktype739 = 0; - $_vtype740 = 0; - $xfer += $input->readMapBegin($_ktype739, $_vtype740, $_size738); - for ($_i742 = 0; $_i742 < $_size738; ++$_i742) + $_size773 = 0; + $_ktype774 = 0; + $_vtype775 = 0; + $xfer += $input->readMapBegin($_ktype774, $_vtype775, $_size773); + for ($_i777 = 0; $_i777 < $_size773; ++$_i777) { - $key743 = ''; - $val744 = new \metastore\Type(); - $xfer += $input->readString($key743); - $val744 = new \metastore\Type(); - $xfer += $val744->read($input); - $this->success[$key743] = $val744; + $key778 = ''; + $val779 = new \metastore\Type(); + $xfer += $input->readString($key778); + $val779 = new \metastore\Type(); + $xfer += $val779->read($input); + $this->success[$key778] = $val779; } $xfer += $input->readMapEnd(); } else { @@ -14133,10 +14265,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter745 => $viter746) + foreach ($this->success as $kiter780 => $viter781) { - $xfer += $output->writeString($kiter745); - $xfer += $viter746->write($output); + $xfer += $output->writeString($kiter780); + $xfer += $viter781->write($output); } } $output->writeMapEnd(); @@ -14340,15 +14472,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size747 = 0; - $_etype750 = 0; - $xfer += $input->readListBegin($_etype750, $_size747); - for ($_i751 = 0; $_i751 < $_size747; ++$_i751) + $_size782 = 0; + $_etype785 = 0; + $xfer += $input->readListBegin($_etype785, $_size782); + for ($_i786 = 0; $_i786 < $_size782; ++$_i786) { - $elem752 = null; - $elem752 = new \metastore\FieldSchema(); - $xfer += $elem752->read($input); - $this->success []= $elem752; + $elem787 = null; + $elem787 = new \metastore\FieldSchema(); + $xfer += $elem787->read($input); + $this->success []= $elem787; } $xfer += $input->readListEnd(); } else { @@ -14400,9 +14532,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter753) + foreach ($this->success as $iter788) { - $xfer += $iter753->write($output); + $xfer += $iter788->write($output); } } $output->writeListEnd(); @@ -14644,15 +14776,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size754 = 0; - $_etype757 = 0; - $xfer += $input->readListBegin($_etype757, $_size754); - for ($_i758 = 0; $_i758 < $_size754; ++$_i758) + $_size789 = 0; + $_etype792 = 0; + $xfer += $input->readListBegin($_etype792, $_size789); + for ($_i793 = 0; $_i793 < $_size789; ++$_i793) { - $elem759 = null; - $elem759 = new \metastore\FieldSchema(); - $xfer += $elem759->read($input); - $this->success []= $elem759; + $elem794 = null; + $elem794 = new \metastore\FieldSchema(); + $xfer += $elem794->read($input); + $this->success []= $elem794; } $xfer += $input->readListEnd(); } else { @@ -14704,9 +14836,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter760) + foreach ($this->success as $iter795) { - $xfer += $iter760->write($output); + $xfer += $iter795->write($output); } } $output->writeListEnd(); @@ -14920,15 +15052,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size761 = 0; - $_etype764 = 0; - $xfer += $input->readListBegin($_etype764, $_size761); - for ($_i765 = 0; $_i765 < $_size761; ++$_i765) + $_size796 = 0; + $_etype799 = 0; + $xfer += $input->readListBegin($_etype799, $_size796); + for ($_i800 = 0; $_i800 < $_size796; ++$_i800) { - $elem766 = null; - $elem766 = new \metastore\FieldSchema(); - $xfer += $elem766->read($input); - $this->success []= $elem766; + $elem801 = null; + $elem801 = new \metastore\FieldSchema(); + $xfer += $elem801->read($input); + $this->success []= $elem801; } $xfer += $input->readListEnd(); } else { @@ -14980,9 +15112,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter767) + foreach ($this->success as $iter802) { - $xfer += $iter767->write($output); + $xfer += $iter802->write($output); } } $output->writeListEnd(); @@ -15224,15 +15356,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size768 = 0; - $_etype771 = 0; - $xfer += $input->readListBegin($_etype771, $_size768); - for ($_i772 = 0; $_i772 < $_size768; ++$_i772) + $_size803 = 0; + $_etype806 = 0; + $xfer += $input->readListBegin($_etype806, $_size803); + for ($_i807 = 0; $_i807 < $_size803; ++$_i807) { - $elem773 = null; - $elem773 = new \metastore\FieldSchema(); - $xfer += $elem773->read($input); - $this->success []= $elem773; + $elem808 = null; + $elem808 = new \metastore\FieldSchema(); + $xfer += $elem808->read($input); + $this->success []= $elem808; } $xfer += $input->readListEnd(); } else { @@ -15284,9 +15416,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter774) + foreach ($this->success as $iter809) { - $xfer += $iter774->write($output); + $xfer += $iter809->write($output); } } $output->writeListEnd(); @@ -15926,15 +16058,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size775 = 0; - $_etype778 = 0; - $xfer += $input->readListBegin($_etype778, $_size775); - for ($_i779 = 0; $_i779 < $_size775; ++$_i779) + $_size810 = 0; + $_etype813 = 0; + $xfer += $input->readListBegin($_etype813, $_size810); + for ($_i814 = 0; $_i814 < $_size810; ++$_i814) { - $elem780 = null; - $elem780 = new \metastore\SQLPrimaryKey(); - $xfer += $elem780->read($input); - $this->primaryKeys []= $elem780; + $elem815 = null; + $elem815 = new \metastore\SQLPrimaryKey(); + $xfer += $elem815->read($input); + $this->primaryKeys []= $elem815; } $xfer += $input->readListEnd(); } else { @@ -15944,15 +16076,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size781 = 0; - $_etype784 = 0; - $xfer += $input->readListBegin($_etype784, $_size781); - for ($_i785 = 0; $_i785 < $_size781; ++$_i785) + $_size816 = 0; + $_etype819 = 0; + $xfer += $input->readListBegin($_etype819, $_size816); + for ($_i820 = 0; $_i820 < $_size816; ++$_i820) { - $elem786 = null; - $elem786 = new \metastore\SQLForeignKey(); - $xfer += $elem786->read($input); - $this->foreignKeys []= $elem786; + $elem821 = null; + $elem821 = new \metastore\SQLForeignKey(); + $xfer += $elem821->read($input); + $this->foreignKeys []= $elem821; } $xfer += $input->readListEnd(); } else { @@ -15962,15 +16094,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size787 = 0; - $_etype790 = 0; - $xfer += $input->readListBegin($_etype790, $_size787); - for ($_i791 = 0; $_i791 < $_size787; ++$_i791) + $_size822 = 0; + $_etype825 = 0; + $xfer += $input->readListBegin($_etype825, $_size822); + for ($_i826 = 0; $_i826 < $_size822; ++$_i826) { - $elem792 = null; - $elem792 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem792->read($input); - $this->uniqueConstraints []= $elem792; + $elem827 = null; + $elem827 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem827->read($input); + $this->uniqueConstraints []= $elem827; } $xfer += $input->readListEnd(); } else { @@ -15980,15 +16112,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size793 = 0; - $_etype796 = 0; - $xfer += $input->readListBegin($_etype796, $_size793); - for ($_i797 = 0; $_i797 < $_size793; ++$_i797) + $_size828 = 0; + $_etype831 = 0; + $xfer += $input->readListBegin($_etype831, $_size828); + for ($_i832 = 0; $_i832 < $_size828; ++$_i832) { - $elem798 = null; - $elem798 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem798->read($input); - $this->notNullConstraints []= $elem798; + $elem833 = null; + $elem833 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem833->read($input); + $this->notNullConstraints []= $elem833; } $xfer += $input->readListEnd(); } else { @@ -16024,9 +16156,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter799) + foreach ($this->primaryKeys as $iter834) { - $xfer += $iter799->write($output); + $xfer += $iter834->write($output); } } $output->writeListEnd(); @@ -16041,9 +16173,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter800) + foreach ($this->foreignKeys as $iter835) { - $xfer += $iter800->write($output); + $xfer += $iter835->write($output); } } $output->writeListEnd(); @@ -16058,9 +16190,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter801) + foreach ($this->uniqueConstraints as $iter836) { - $xfer += $iter801->write($output); + $xfer += $iter836->write($output); } } $output->writeListEnd(); @@ -16075,9 +16207,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter802) + foreach ($this->notNullConstraints as $iter837) { - $xfer += $iter802->write($output); + $xfer += $iter837->write($output); } } $output->writeListEnd(); @@ -17713,14 +17845,14 @@ class ThriftHiveMetastore_truncate_table_args { case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size803 = 0; - $_etype806 = 0; - $xfer += $input->readListBegin($_etype806, $_size803); - for ($_i807 = 0; $_i807 < $_size803; ++$_i807) + $_size838 = 0; + $_etype841 = 0; + $xfer += $input->readListBegin($_etype841, $_size838); + for ($_i842 = 0; $_i842 < $_size838; ++$_i842) { - $elem808 = null; - $xfer += $input->readString($elem808); - $this->partNames []= $elem808; + $elem843 = null; + $xfer += $input->readString($elem843); + $this->partNames []= $elem843; } $xfer += $input->readListEnd(); } else { @@ -17758,9 +17890,9 @@ class ThriftHiveMetastore_truncate_table_args { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter809) + foreach ($this->partNames as $iter844) { - $xfer += $output->writeString($iter809); + $xfer += $output->writeString($iter844); } } $output->writeListEnd(); @@ -18011,14 +18143,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size810 = 0; - $_etype813 = 0; - $xfer += $input->readListBegin($_etype813, $_size810); - for ($_i814 = 0; $_i814 < $_size810; ++$_i814) + $_size845 = 0; + $_etype848 = 0; + $xfer += $input->readListBegin($_etype848, $_size845); + for ($_i849 = 0; $_i849 < $_size845; ++$_i849) { - $elem815 = null; - $xfer += $input->readString($elem815); - $this->success []= $elem815; + $elem850 = null; + $xfer += $input->readString($elem850); + $this->success []= $elem850; } $xfer += $input->readListEnd(); } else { @@ -18054,9 +18186,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter816) + foreach ($this->success as $iter851) { - $xfer += $output->writeString($iter816); + $xfer += $output->writeString($iter851); } } $output->writeListEnd(); @@ -18258,14 +18390,14 @@ class ThriftHiveMetastore_get_tables_by_type_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size817 = 0; - $_etype820 = 0; - $xfer += $input->readListBegin($_etype820, $_size817); - for ($_i821 = 0; $_i821 < $_size817; ++$_i821) + $_size852 = 0; + $_etype855 = 0; + $xfer += $input->readListBegin($_etype855, $_size852); + for ($_i856 = 0; $_i856 < $_size852; ++$_i856) { - $elem822 = null; - $xfer += $input->readString($elem822); - $this->success []= $elem822; + $elem857 = null; + $xfer += $input->readString($elem857); + $this->success []= $elem857; } $xfer += $input->readListEnd(); } else { @@ -18301,9 +18433,9 @@ class ThriftHiveMetastore_get_tables_by_type_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter823) + foreach ($this->success as $iter858) { - $xfer += $output->writeString($iter823); + $xfer += $output->writeString($iter858); } } $output->writeListEnd(); @@ -18459,14 +18591,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size824 = 0; - $_etype827 = 0; - $xfer += $input->readListBegin($_etype827, $_size824); - for ($_i828 = 0; $_i828 < $_size824; ++$_i828) + $_size859 = 0; + $_etype862 = 0; + $xfer += $input->readListBegin($_etype862, $_size859); + for ($_i863 = 0; $_i863 < $_size859; ++$_i863) { - $elem829 = null; - $xfer += $input->readString($elem829); - $this->success []= $elem829; + $elem864 = null; + $xfer += $input->readString($elem864); + $this->success []= $elem864; } $xfer += $input->readListEnd(); } else { @@ -18502,9 +18634,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter830) + foreach ($this->success as $iter865) { - $xfer += $output->writeString($iter830); + $xfer += $output->writeString($iter865); } } $output->writeListEnd(); @@ -18609,14 +18741,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size831 = 0; - $_etype834 = 0; - $xfer += $input->readListBegin($_etype834, $_size831); - for ($_i835 = 0; $_i835 < $_size831; ++$_i835) + $_size866 = 0; + $_etype869 = 0; + $xfer += $input->readListBegin($_etype869, $_size866); + for ($_i870 = 0; $_i870 < $_size866; ++$_i870) { - $elem836 = null; - $xfer += $input->readString($elem836); - $this->tbl_types []= $elem836; + $elem871 = null; + $xfer += $input->readString($elem871); + $this->tbl_types []= $elem871; } $xfer += $input->readListEnd(); } else { @@ -18654,9 +18786,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter837) + foreach ($this->tbl_types as $iter872) { - $xfer += $output->writeString($iter837); + $xfer += $output->writeString($iter872); } } $output->writeListEnd(); @@ -18733,15 +18865,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size838 = 0; - $_etype841 = 0; - $xfer += $input->readListBegin($_etype841, $_size838); - for ($_i842 = 0; $_i842 < $_size838; ++$_i842) + $_size873 = 0; + $_etype876 = 0; + $xfer += $input->readListBegin($_etype876, $_size873); + for ($_i877 = 0; $_i877 < $_size873; ++$_i877) { - $elem843 = null; - $elem843 = new \metastore\TableMeta(); - $xfer += $elem843->read($input); - $this->success []= $elem843; + $elem878 = null; + $elem878 = new \metastore\TableMeta(); + $xfer += $elem878->read($input); + $this->success []= $elem878; } $xfer += $input->readListEnd(); } else { @@ -18777,9 +18909,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter844) + foreach ($this->success as $iter879) { - $xfer += $iter844->write($output); + $xfer += $iter879->write($output); } } $output->writeListEnd(); @@ -18935,14 +19067,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size845 = 0; - $_etype848 = 0; - $xfer += $input->readListBegin($_etype848, $_size845); - for ($_i849 = 0; $_i849 < $_size845; ++$_i849) + $_size880 = 0; + $_etype883 = 0; + $xfer += $input->readListBegin($_etype883, $_size880); + for ($_i884 = 0; $_i884 < $_size880; ++$_i884) { - $elem850 = null; - $xfer += $input->readString($elem850); - $this->success []= $elem850; + $elem885 = null; + $xfer += $input->readString($elem885); + $this->success []= $elem885; } $xfer += $input->readListEnd(); } else { @@ -18978,9 +19110,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter851) + foreach ($this->success as $iter886) { - $xfer += $output->writeString($iter851); + $xfer += $output->writeString($iter886); } } $output->writeListEnd(); @@ -19295,14 +19427,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size852 = 0; - $_etype855 = 0; - $xfer += $input->readListBegin($_etype855, $_size852); - for ($_i856 = 0; $_i856 < $_size852; ++$_i856) + $_size887 = 0; + $_etype890 = 0; + $xfer += $input->readListBegin($_etype890, $_size887); + for ($_i891 = 0; $_i891 < $_size887; ++$_i891) { - $elem857 = null; - $xfer += $input->readString($elem857); - $this->tbl_names []= $elem857; + $elem892 = null; + $xfer += $input->readString($elem892); + $this->tbl_names []= $elem892; } $xfer += $input->readListEnd(); } else { @@ -19335,9 +19467,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter858) + foreach ($this->tbl_names as $iter893) { - $xfer += $output->writeString($iter858); + $xfer += $output->writeString($iter893); } } $output->writeListEnd(); @@ -19402,15 +19534,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size859 = 0; - $_etype862 = 0; - $xfer += $input->readListBegin($_etype862, $_size859); - for ($_i863 = 0; $_i863 < $_size859; ++$_i863) + $_size894 = 0; + $_etype897 = 0; + $xfer += $input->readListBegin($_etype897, $_size894); + for ($_i898 = 0; $_i898 < $_size894; ++$_i898) { - $elem864 = null; - $elem864 = new \metastore\Table(); - $xfer += $elem864->read($input); - $this->success []= $elem864; + $elem899 = null; + $elem899 = new \metastore\Table(); + $xfer += $elem899->read($input); + $this->success []= $elem899; } $xfer += $input->readListEnd(); } else { @@ -19438,9 +19570,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter865) + foreach ($this->success as $iter900) { - $xfer += $iter865->write($output); + $xfer += $iter900->write($output); } } $output->writeListEnd(); @@ -19967,14 +20099,14 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size866 = 0; - $_etype869 = 0; - $xfer += $input->readListBegin($_etype869, $_size866); - for ($_i870 = 0; $_i870 < $_size866; ++$_i870) + $_size901 = 0; + $_etype904 = 0; + $xfer += $input->readListBegin($_etype904, $_size901); + for ($_i905 = 0; $_i905 < $_size901; ++$_i905) { - $elem871 = null; - $xfer += $input->readString($elem871); - $this->tbl_names []= $elem871; + $elem906 = null; + $xfer += $input->readString($elem906); + $this->tbl_names []= $elem906; } $xfer += $input->readListEnd(); } else { @@ -20007,9 +20139,9 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter872) + foreach ($this->tbl_names as $iter907) { - $xfer += $output->writeString($iter872); + $xfer += $output->writeString($iter907); } } $output->writeListEnd(); @@ -20114,18 +20246,18 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size873 = 0; - $_ktype874 = 0; - $_vtype875 = 0; - $xfer += $input->readMapBegin($_ktype874, $_vtype875, $_size873); - for ($_i877 = 0; $_i877 < $_size873; ++$_i877) + $_size908 = 0; + $_ktype909 = 0; + $_vtype910 = 0; + $xfer += $input->readMapBegin($_ktype909, $_vtype910, $_size908); + for ($_i912 = 0; $_i912 < $_size908; ++$_i912) { - $key878 = ''; - $val879 = new \metastore\Materialization(); - $xfer += $input->readString($key878); - $val879 = new \metastore\Materialization(); - $xfer += $val879->read($input); - $this->success[$key878] = $val879; + $key913 = ''; + $val914 = new \metastore\Materialization(); + $xfer += $input->readString($key913); + $val914 = new \metastore\Materialization(); + $xfer += $val914->read($input); + $this->success[$key913] = $val914; } $xfer += $input->readMapEnd(); } else { @@ -20177,10 +20309,10 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter880 => $viter881) + foreach ($this->success as $kiter915 => $viter916) { - $xfer += $output->writeString($kiter880); - $xfer += $viter881->write($output); + $xfer += $output->writeString($kiter915); + $xfer += $viter916->write($output); } } $output->writeMapEnd(); @@ -20416,14 +20548,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size882 = 0; - $_etype885 = 0; - $xfer += $input->readListBegin($_etype885, $_size882); - for ($_i886 = 0; $_i886 < $_size882; ++$_i886) + $_size917 = 0; + $_etype920 = 0; + $xfer += $input->readListBegin($_etype920, $_size917); + for ($_i921 = 0; $_i921 < $_size917; ++$_i921) { - $elem887 = null; - $xfer += $input->readString($elem887); - $this->success []= $elem887; + $elem922 = null; + $xfer += $input->readString($elem922); + $this->success []= $elem922; } $xfer += $input->readListEnd(); } else { @@ -20475,9 +20607,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter888) + foreach ($this->success as $iter923) { - $xfer += $output->writeString($iter888); + $xfer += $output->writeString($iter923); } } $output->writeListEnd(); @@ -21790,15 +21922,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size889 = 0; - $_etype892 = 0; - $xfer += $input->readListBegin($_etype892, $_size889); - for ($_i893 = 0; $_i893 < $_size889; ++$_i893) + $_size924 = 0; + $_etype927 = 0; + $xfer += $input->readListBegin($_etype927, $_size924); + for ($_i928 = 0; $_i928 < $_size924; ++$_i928) { - $elem894 = null; - $elem894 = new \metastore\Partition(); - $xfer += $elem894->read($input); - $this->new_parts []= $elem894; + $elem929 = null; + $elem929 = new \metastore\Partition(); + $xfer += $elem929->read($input); + $this->new_parts []= $elem929; } $xfer += $input->readListEnd(); } else { @@ -21826,9 +21958,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter895) + foreach ($this->new_parts as $iter930) { - $xfer += $iter895->write($output); + $xfer += $iter930->write($output); } } $output->writeListEnd(); @@ -22043,15 +22175,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size896 = 0; - $_etype899 = 0; - $xfer += $input->readListBegin($_etype899, $_size896); - for ($_i900 = 0; $_i900 < $_size896; ++$_i900) + $_size931 = 0; + $_etype934 = 0; + $xfer += $input->readListBegin($_etype934, $_size931); + for ($_i935 = 0; $_i935 < $_size931; ++$_i935) { - $elem901 = null; - $elem901 = new \metastore\PartitionSpec(); - $xfer += $elem901->read($input); - $this->new_parts []= $elem901; + $elem936 = null; + $elem936 = new \metastore\PartitionSpec(); + $xfer += $elem936->read($input); + $this->new_parts []= $elem936; } $xfer += $input->readListEnd(); } else { @@ -22079,9 +22211,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter902) + foreach ($this->new_parts as $iter937) { - $xfer += $iter902->write($output); + $xfer += $iter937->write($output); } } $output->writeListEnd(); @@ -22331,14 +22463,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size903 = 0; - $_etype906 = 0; - $xfer += $input->readListBegin($_etype906, $_size903); - for ($_i907 = 0; $_i907 < $_size903; ++$_i907) + $_size938 = 0; + $_etype941 = 0; + $xfer += $input->readListBegin($_etype941, $_size938); + for ($_i942 = 0; $_i942 < $_size938; ++$_i942) { - $elem908 = null; - $xfer += $input->readString($elem908); - $this->part_vals []= $elem908; + $elem943 = null; + $xfer += $input->readString($elem943); + $this->part_vals []= $elem943; } $xfer += $input->readListEnd(); } else { @@ -22376,9 +22508,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter909) + foreach ($this->part_vals as $iter944) { - $xfer += $output->writeString($iter909); + $xfer += $output->writeString($iter944); } } $output->writeListEnd(); @@ -22880,14 +23012,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size910 = 0; - $_etype913 = 0; - $xfer += $input->readListBegin($_etype913, $_size910); - for ($_i914 = 0; $_i914 < $_size910; ++$_i914) + $_size945 = 0; + $_etype948 = 0; + $xfer += $input->readListBegin($_etype948, $_size945); + for ($_i949 = 0; $_i949 < $_size945; ++$_i949) { - $elem915 = null; - $xfer += $input->readString($elem915); - $this->part_vals []= $elem915; + $elem950 = null; + $xfer += $input->readString($elem950); + $this->part_vals []= $elem950; } $xfer += $input->readListEnd(); } else { @@ -22933,9 +23065,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter916) + foreach ($this->part_vals as $iter951) { - $xfer += $output->writeString($iter916); + $xfer += $output->writeString($iter951); } } $output->writeListEnd(); @@ -23789,14 +23921,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size917 = 0; - $_etype920 = 0; - $xfer += $input->readListBegin($_etype920, $_size917); - for ($_i921 = 0; $_i921 < $_size917; ++$_i921) + $_size952 = 0; + $_etype955 = 0; + $xfer += $input->readListBegin($_etype955, $_size952); + for ($_i956 = 0; $_i956 < $_size952; ++$_i956) { - $elem922 = null; - $xfer += $input->readString($elem922); - $this->part_vals []= $elem922; + $elem957 = null; + $xfer += $input->readString($elem957); + $this->part_vals []= $elem957; } $xfer += $input->readListEnd(); } else { @@ -23841,9 +23973,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter923) + foreach ($this->part_vals as $iter958) { - $xfer += $output->writeString($iter923); + $xfer += $output->writeString($iter958); } } $output->writeListEnd(); @@ -24096,14 +24228,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size924 = 0; - $_etype927 = 0; - $xfer += $input->readListBegin($_etype927, $_size924); - for ($_i928 = 0; $_i928 < $_size924; ++$_i928) + $_size959 = 0; + $_etype962 = 0; + $xfer += $input->readListBegin($_etype962, $_size959); + for ($_i963 = 0; $_i963 < $_size959; ++$_i963) { - $elem929 = null; - $xfer += $input->readString($elem929); - $this->part_vals []= $elem929; + $elem964 = null; + $xfer += $input->readString($elem964); + $this->part_vals []= $elem964; } $xfer += $input->readListEnd(); } else { @@ -24156,9 +24288,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter930) + foreach ($this->part_vals as $iter965) { - $xfer += $output->writeString($iter930); + $xfer += $output->writeString($iter965); } } $output->writeListEnd(); @@ -25172,14 +25304,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size931 = 0; - $_etype934 = 0; - $xfer += $input->readListBegin($_etype934, $_size931); - for ($_i935 = 0; $_i935 < $_size931; ++$_i935) + $_size966 = 0; + $_etype969 = 0; + $xfer += $input->readListBegin($_etype969, $_size966); + for ($_i970 = 0; $_i970 < $_size966; ++$_i970) { - $elem936 = null; - $xfer += $input->readString($elem936); - $this->part_vals []= $elem936; + $elem971 = null; + $xfer += $input->readString($elem971); + $this->part_vals []= $elem971; } $xfer += $input->readListEnd(); } else { @@ -25217,9 +25349,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter937) + foreach ($this->part_vals as $iter972) { - $xfer += $output->writeString($iter937); + $xfer += $output->writeString($iter972); } } $output->writeListEnd(); @@ -25461,17 +25593,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size938 = 0; - $_ktype939 = 0; - $_vtype940 = 0; - $xfer += $input->readMapBegin($_ktype939, $_vtype940, $_size938); - for ($_i942 = 0; $_i942 < $_size938; ++$_i942) + $_size973 = 0; + $_ktype974 = 0; + $_vtype975 = 0; + $xfer += $input->readMapBegin($_ktype974, $_vtype975, $_size973); + for ($_i977 = 0; $_i977 < $_size973; ++$_i977) { - $key943 = ''; - $val944 = ''; - $xfer += $input->readString($key943); - $xfer += $input->readString($val944); - $this->partitionSpecs[$key943] = $val944; + $key978 = ''; + $val979 = ''; + $xfer += $input->readString($key978); + $xfer += $input->readString($val979); + $this->partitionSpecs[$key978] = $val979; } $xfer += $input->readMapEnd(); } else { @@ -25527,10 +25659,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter945 => $viter946) + foreach ($this->partitionSpecs as $kiter980 => $viter981) { - $xfer += $output->writeString($kiter945); - $xfer += $output->writeString($viter946); + $xfer += $output->writeString($kiter980); + $xfer += $output->writeString($viter981); } } $output->writeMapEnd(); @@ -25842,17 +25974,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size947 = 0; - $_ktype948 = 0; - $_vtype949 = 0; - $xfer += $input->readMapBegin($_ktype948, $_vtype949, $_size947); - for ($_i951 = 0; $_i951 < $_size947; ++$_i951) + $_size982 = 0; + $_ktype983 = 0; + $_vtype984 = 0; + $xfer += $input->readMapBegin($_ktype983, $_vtype984, $_size982); + for ($_i986 = 0; $_i986 < $_size982; ++$_i986) { - $key952 = ''; - $val953 = ''; - $xfer += $input->readString($key952); - $xfer += $input->readString($val953); - $this->partitionSpecs[$key952] = $val953; + $key987 = ''; + $val988 = ''; + $xfer += $input->readString($key987); + $xfer += $input->readString($val988); + $this->partitionSpecs[$key987] = $val988; } $xfer += $input->readMapEnd(); } else { @@ -25908,10 +26040,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter954 => $viter955) + foreach ($this->partitionSpecs as $kiter989 => $viter990) { - $xfer += $output->writeString($kiter954); - $xfer += $output->writeString($viter955); + $xfer += $output->writeString($kiter989); + $xfer += $output->writeString($viter990); } } $output->writeMapEnd(); @@ -26044,15 +26176,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size956 = 0; - $_etype959 = 0; - $xfer += $input->readListBegin($_etype959, $_size956); - for ($_i960 = 0; $_i960 < $_size956; ++$_i960) + $_size991 = 0; + $_etype994 = 0; + $xfer += $input->readListBegin($_etype994, $_size991); + for ($_i995 = 0; $_i995 < $_size991; ++$_i995) { - $elem961 = null; - $elem961 = new \metastore\Partition(); - $xfer += $elem961->read($input); - $this->success []= $elem961; + $elem996 = null; + $elem996 = new \metastore\Partition(); + $xfer += $elem996->read($input); + $this->success []= $elem996; } $xfer += $input->readListEnd(); } else { @@ -26112,9 +26244,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter962) + foreach ($this->success as $iter997) { - $xfer += $iter962->write($output); + $xfer += $iter997->write($output); } } $output->writeListEnd(); @@ -26260,14 +26392,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size963 = 0; - $_etype966 = 0; - $xfer += $input->readListBegin($_etype966, $_size963); - for ($_i967 = 0; $_i967 < $_size963; ++$_i967) + $_size998 = 0; + $_etype1001 = 0; + $xfer += $input->readListBegin($_etype1001, $_size998); + for ($_i1002 = 0; $_i1002 < $_size998; ++$_i1002) { - $elem968 = null; - $xfer += $input->readString($elem968); - $this->part_vals []= $elem968; + $elem1003 = null; + $xfer += $input->readString($elem1003); + $this->part_vals []= $elem1003; } $xfer += $input->readListEnd(); } else { @@ -26284,14 +26416,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size969 = 0; - $_etype972 = 0; - $xfer += $input->readListBegin($_etype972, $_size969); - for ($_i973 = 0; $_i973 < $_size969; ++$_i973) + $_size1004 = 0; + $_etype1007 = 0; + $xfer += $input->readListBegin($_etype1007, $_size1004); + for ($_i1008 = 0; $_i1008 < $_size1004; ++$_i1008) { - $elem974 = null; - $xfer += $input->readString($elem974); - $this->group_names []= $elem974; + $elem1009 = null; + $xfer += $input->readString($elem1009); + $this->group_names []= $elem1009; } $xfer += $input->readListEnd(); } else { @@ -26329,9 +26461,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter975) + foreach ($this->part_vals as $iter1010) { - $xfer += $output->writeString($iter975); + $xfer += $output->writeString($iter1010); } } $output->writeListEnd(); @@ -26351,9 +26483,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter976) + foreach ($this->group_names as $iter1011) { - $xfer += $output->writeString($iter976); + $xfer += $output->writeString($iter1011); } } $output->writeListEnd(); @@ -26944,15 +27076,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size977 = 0; - $_etype980 = 0; - $xfer += $input->readListBegin($_etype980, $_size977); - for ($_i981 = 0; $_i981 < $_size977; ++$_i981) + $_size1012 = 0; + $_etype1015 = 0; + $xfer += $input->readListBegin($_etype1015, $_size1012); + for ($_i1016 = 0; $_i1016 < $_size1012; ++$_i1016) { - $elem982 = null; - $elem982 = new \metastore\Partition(); - $xfer += $elem982->read($input); - $this->success []= $elem982; + $elem1017 = null; + $elem1017 = new \metastore\Partition(); + $xfer += $elem1017->read($input); + $this->success []= $elem1017; } $xfer += $input->readListEnd(); } else { @@ -26996,9 +27128,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter983) + foreach ($this->success as $iter1018) { - $xfer += $iter983->write($output); + $xfer += $iter1018->write($output); } } $output->writeListEnd(); @@ -27144,14 +27276,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size984 = 0; - $_etype987 = 0; - $xfer += $input->readListBegin($_etype987, $_size984); - for ($_i988 = 0; $_i988 < $_size984; ++$_i988) + $_size1019 = 0; + $_etype1022 = 0; + $xfer += $input->readListBegin($_etype1022, $_size1019); + for ($_i1023 = 0; $_i1023 < $_size1019; ++$_i1023) { - $elem989 = null; - $xfer += $input->readString($elem989); - $this->group_names []= $elem989; + $elem1024 = null; + $xfer += $input->readString($elem1024); + $this->group_names []= $elem1024; } $xfer += $input->readListEnd(); } else { @@ -27199,9 +27331,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter990) + foreach ($this->group_names as $iter1025) { - $xfer += $output->writeString($iter990); + $xfer += $output->writeString($iter1025); } } $output->writeListEnd(); @@ -27290,15 +27422,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size991 = 0; - $_etype994 = 0; - $xfer += $input->readListBegin($_etype994, $_size991); - for ($_i995 = 0; $_i995 < $_size991; ++$_i995) + $_size1026 = 0; + $_etype1029 = 0; + $xfer += $input->readListBegin($_etype1029, $_size1026); + for ($_i1030 = 0; $_i1030 < $_size1026; ++$_i1030) { - $elem996 = null; - $elem996 = new \metastore\Partition(); - $xfer += $elem996->read($input); - $this->success []= $elem996; + $elem1031 = null; + $elem1031 = new \metastore\Partition(); + $xfer += $elem1031->read($input); + $this->success []= $elem1031; } $xfer += $input->readListEnd(); } else { @@ -27342,9 +27474,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter997) + foreach ($this->success as $iter1032) { - $xfer += $iter997->write($output); + $xfer += $iter1032->write($output); } } $output->writeListEnd(); @@ -27564,15 +27696,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size998 = 0; - $_etype1001 = 0; - $xfer += $input->readListBegin($_etype1001, $_size998); - for ($_i1002 = 0; $_i1002 < $_size998; ++$_i1002) + $_size1033 = 0; + $_etype1036 = 0; + $xfer += $input->readListBegin($_etype1036, $_size1033); + for ($_i1037 = 0; $_i1037 < $_size1033; ++$_i1037) { - $elem1003 = null; - $elem1003 = new \metastore\PartitionSpec(); - $xfer += $elem1003->read($input); - $this->success []= $elem1003; + $elem1038 = null; + $elem1038 = new \metastore\PartitionSpec(); + $xfer += $elem1038->read($input); + $this->success []= $elem1038; } $xfer += $input->readListEnd(); } else { @@ -27616,9 +27748,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1004) + foreach ($this->success as $iter1039) { - $xfer += $iter1004->write($output); + $xfer += $iter1039->write($output); } } $output->writeListEnd(); @@ -27837,14 +27969,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1005 = 0; - $_etype1008 = 0; - $xfer += $input->readListBegin($_etype1008, $_size1005); - for ($_i1009 = 0; $_i1009 < $_size1005; ++$_i1009) + $_size1040 = 0; + $_etype1043 = 0; + $xfer += $input->readListBegin($_etype1043, $_size1040); + for ($_i1044 = 0; $_i1044 < $_size1040; ++$_i1044) { - $elem1010 = null; - $xfer += $input->readString($elem1010); - $this->success []= $elem1010; + $elem1045 = null; + $xfer += $input->readString($elem1045); + $this->success []= $elem1045; } $xfer += $input->readListEnd(); } else { @@ -27888,9 +28020,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1011) + foreach ($this->success as $iter1046) { - $xfer += $output->writeString($iter1011); + $xfer += $output->writeString($iter1046); } } $output->writeListEnd(); @@ -28221,14 +28353,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1012 = 0; - $_etype1015 = 0; - $xfer += $input->readListBegin($_etype1015, $_size1012); - for ($_i1016 = 0; $_i1016 < $_size1012; ++$_i1016) + $_size1047 = 0; + $_etype1050 = 0; + $xfer += $input->readListBegin($_etype1050, $_size1047); + for ($_i1051 = 0; $_i1051 < $_size1047; ++$_i1051) { - $elem1017 = null; - $xfer += $input->readString($elem1017); - $this->part_vals []= $elem1017; + $elem1052 = null; + $xfer += $input->readString($elem1052); + $this->part_vals []= $elem1052; } $xfer += $input->readListEnd(); } else { @@ -28273,9 +28405,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1018) + foreach ($this->part_vals as $iter1053) { - $xfer += $output->writeString($iter1018); + $xfer += $output->writeString($iter1053); } } $output->writeListEnd(); @@ -28369,15 +28501,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1019 = 0; - $_etype1022 = 0; - $xfer += $input->readListBegin($_etype1022, $_size1019); - for ($_i1023 = 0; $_i1023 < $_size1019; ++$_i1023) + $_size1054 = 0; + $_etype1057 = 0; + $xfer += $input->readListBegin($_etype1057, $_size1054); + for ($_i1058 = 0; $_i1058 < $_size1054; ++$_i1058) { - $elem1024 = null; - $elem1024 = new \metastore\Partition(); - $xfer += $elem1024->read($input); - $this->success []= $elem1024; + $elem1059 = null; + $elem1059 = new \metastore\Partition(); + $xfer += $elem1059->read($input); + $this->success []= $elem1059; } $xfer += $input->readListEnd(); } else { @@ -28421,9 +28553,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1025) + foreach ($this->success as $iter1060) { - $xfer += $iter1025->write($output); + $xfer += $iter1060->write($output); } } $output->writeListEnd(); @@ -28570,14 +28702,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1026 = 0; - $_etype1029 = 0; - $xfer += $input->readListBegin($_etype1029, $_size1026); - for ($_i1030 = 0; $_i1030 < $_size1026; ++$_i1030) + $_size1061 = 0; + $_etype1064 = 0; + $xfer += $input->readListBegin($_etype1064, $_size1061); + for ($_i1065 = 0; $_i1065 < $_size1061; ++$_i1065) { - $elem1031 = null; - $xfer += $input->readString($elem1031); - $this->part_vals []= $elem1031; + $elem1066 = null; + $xfer += $input->readString($elem1066); + $this->part_vals []= $elem1066; } $xfer += $input->readListEnd(); } else { @@ -28601,14 +28733,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1032 = 0; - $_etype1035 = 0; - $xfer += $input->readListBegin($_etype1035, $_size1032); - for ($_i1036 = 0; $_i1036 < $_size1032; ++$_i1036) + $_size1067 = 0; + $_etype1070 = 0; + $xfer += $input->readListBegin($_etype1070, $_size1067); + for ($_i1071 = 0; $_i1071 < $_size1067; ++$_i1071) { - $elem1037 = null; - $xfer += $input->readString($elem1037); - $this->group_names []= $elem1037; + $elem1072 = null; + $xfer += $input->readString($elem1072); + $this->group_names []= $elem1072; } $xfer += $input->readListEnd(); } else { @@ -28646,9 +28778,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1038) + foreach ($this->part_vals as $iter1073) { - $xfer += $output->writeString($iter1038); + $xfer += $output->writeString($iter1073); } } $output->writeListEnd(); @@ -28673,9 +28805,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1039) + foreach ($this->group_names as $iter1074) { - $xfer += $output->writeString($iter1039); + $xfer += $output->writeString($iter1074); } } $output->writeListEnd(); @@ -28764,15 +28896,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1040 = 0; - $_etype1043 = 0; - $xfer += $input->readListBegin($_etype1043, $_size1040); - for ($_i1044 = 0; $_i1044 < $_size1040; ++$_i1044) + $_size1075 = 0; + $_etype1078 = 0; + $xfer += $input->readListBegin($_etype1078, $_size1075); + for ($_i1079 = 0; $_i1079 < $_size1075; ++$_i1079) { - $elem1045 = null; - $elem1045 = new \metastore\Partition(); - $xfer += $elem1045->read($input); - $this->success []= $elem1045; + $elem1080 = null; + $elem1080 = new \metastore\Partition(); + $xfer += $elem1080->read($input); + $this->success []= $elem1080; } $xfer += $input->readListEnd(); } else { @@ -28816,9 +28948,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1046) + foreach ($this->success as $iter1081) { - $xfer += $iter1046->write($output); + $xfer += $iter1081->write($output); } } $output->writeListEnd(); @@ -28939,14 +29071,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1047 = 0; - $_etype1050 = 0; - $xfer += $input->readListBegin($_etype1050, $_size1047); - for ($_i1051 = 0; $_i1051 < $_size1047; ++$_i1051) + $_size1082 = 0; + $_etype1085 = 0; + $xfer += $input->readListBegin($_etype1085, $_size1082); + for ($_i1086 = 0; $_i1086 < $_size1082; ++$_i1086) { - $elem1052 = null; - $xfer += $input->readString($elem1052); - $this->part_vals []= $elem1052; + $elem1087 = null; + $xfer += $input->readString($elem1087); + $this->part_vals []= $elem1087; } $xfer += $input->readListEnd(); } else { @@ -28991,9 +29123,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1053) + foreach ($this->part_vals as $iter1088) { - $xfer += $output->writeString($iter1053); + $xfer += $output->writeString($iter1088); } } $output->writeListEnd(); @@ -29086,14 +29218,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1054 = 0; - $_etype1057 = 0; - $xfer += $input->readListBegin($_etype1057, $_size1054); - for ($_i1058 = 0; $_i1058 < $_size1054; ++$_i1058) + $_size1089 = 0; + $_etype1092 = 0; + $xfer += $input->readListBegin($_etype1092, $_size1089); + for ($_i1093 = 0; $_i1093 < $_size1089; ++$_i1093) { - $elem1059 = null; - $xfer += $input->readString($elem1059); - $this->success []= $elem1059; + $elem1094 = null; + $xfer += $input->readString($elem1094); + $this->success []= $elem1094; } $xfer += $input->readListEnd(); } else { @@ -29137,9 +29269,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1060) + foreach ($this->success as $iter1095) { - $xfer += $output->writeString($iter1060); + $xfer += $output->writeString($iter1095); } } $output->writeListEnd(); @@ -29382,15 +29514,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1061 = 0; - $_etype1064 = 0; - $xfer += $input->readListBegin($_etype1064, $_size1061); - for ($_i1065 = 0; $_i1065 < $_size1061; ++$_i1065) + $_size1096 = 0; + $_etype1099 = 0; + $xfer += $input->readListBegin($_etype1099, $_size1096); + for ($_i1100 = 0; $_i1100 < $_size1096; ++$_i1100) { - $elem1066 = null; - $elem1066 = new \metastore\Partition(); - $xfer += $elem1066->read($input); - $this->success []= $elem1066; + $elem1101 = null; + $elem1101 = new \metastore\Partition(); + $xfer += $elem1101->read($input); + $this->success []= $elem1101; } $xfer += $input->readListEnd(); } else { @@ -29434,9 +29566,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1067) + foreach ($this->success as $iter1102) { - $xfer += $iter1067->write($output); + $xfer += $iter1102->write($output); } } $output->writeListEnd(); @@ -29679,15 +29811,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1068 = 0; - $_etype1071 = 0; - $xfer += $input->readListBegin($_etype1071, $_size1068); - for ($_i1072 = 0; $_i1072 < $_size1068; ++$_i1072) + $_size1103 = 0; + $_etype1106 = 0; + $xfer += $input->readListBegin($_etype1106, $_size1103); + for ($_i1107 = 0; $_i1107 < $_size1103; ++$_i1107) { - $elem1073 = null; - $elem1073 = new \metastore\PartitionSpec(); - $xfer += $elem1073->read($input); - $this->success []= $elem1073; + $elem1108 = null; + $elem1108 = new \metastore\PartitionSpec(); + $xfer += $elem1108->read($input); + $this->success []= $elem1108; } $xfer += $input->readListEnd(); } else { @@ -29731,9 +29863,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1074) + foreach ($this->success as $iter1109) { - $xfer += $iter1074->write($output); + $xfer += $iter1109->write($output); } } $output->writeListEnd(); @@ -30299,14 +30431,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1075 = 0; - $_etype1078 = 0; - $xfer += $input->readListBegin($_etype1078, $_size1075); - for ($_i1079 = 0; $_i1079 < $_size1075; ++$_i1079) + $_size1110 = 0; + $_etype1113 = 0; + $xfer += $input->readListBegin($_etype1113, $_size1110); + for ($_i1114 = 0; $_i1114 < $_size1110; ++$_i1114) { - $elem1080 = null; - $xfer += $input->readString($elem1080); - $this->names []= $elem1080; + $elem1115 = null; + $xfer += $input->readString($elem1115); + $this->names []= $elem1115; } $xfer += $input->readListEnd(); } else { @@ -30344,9 +30476,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1081) + foreach ($this->names as $iter1116) { - $xfer += $output->writeString($iter1081); + $xfer += $output->writeString($iter1116); } } $output->writeListEnd(); @@ -30435,15 +30567,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1082 = 0; - $_etype1085 = 0; - $xfer += $input->readListBegin($_etype1085, $_size1082); - for ($_i1086 = 0; $_i1086 < $_size1082; ++$_i1086) + $_size1117 = 0; + $_etype1120 = 0; + $xfer += $input->readListBegin($_etype1120, $_size1117); + for ($_i1121 = 0; $_i1121 < $_size1117; ++$_i1121) { - $elem1087 = null; - $elem1087 = new \metastore\Partition(); - $xfer += $elem1087->read($input); - $this->success []= $elem1087; + $elem1122 = null; + $elem1122 = new \metastore\Partition(); + $xfer += $elem1122->read($input); + $this->success []= $elem1122; } $xfer += $input->readListEnd(); } else { @@ -30487,9 +30619,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1088) + foreach ($this->success as $iter1123) { - $xfer += $iter1088->write($output); + $xfer += $iter1123->write($output); } } $output->writeListEnd(); @@ -30828,15 +30960,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1089 = 0; - $_etype1092 = 0; - $xfer += $input->readListBegin($_etype1092, $_size1089); - for ($_i1093 = 0; $_i1093 < $_size1089; ++$_i1093) + $_size1124 = 0; + $_etype1127 = 0; + $xfer += $input->readListBegin($_etype1127, $_size1124); + for ($_i1128 = 0; $_i1128 < $_size1124; ++$_i1128) { - $elem1094 = null; - $elem1094 = new \metastore\Partition(); - $xfer += $elem1094->read($input); - $this->new_parts []= $elem1094; + $elem1129 = null; + $elem1129 = new \metastore\Partition(); + $xfer += $elem1129->read($input); + $this->new_parts []= $elem1129; } $xfer += $input->readListEnd(); } else { @@ -30874,9 +31006,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1095) + foreach ($this->new_parts as $iter1130) { - $xfer += $iter1095->write($output); + $xfer += $iter1130->write($output); } } $output->writeListEnd(); @@ -31091,15 +31223,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1096 = 0; - $_etype1099 = 0; - $xfer += $input->readListBegin($_etype1099, $_size1096); - for ($_i1100 = 0; $_i1100 < $_size1096; ++$_i1100) + $_size1131 = 0; + $_etype1134 = 0; + $xfer += $input->readListBegin($_etype1134, $_size1131); + for ($_i1135 = 0; $_i1135 < $_size1131; ++$_i1135) { - $elem1101 = null; - $elem1101 = new \metastore\Partition(); - $xfer += $elem1101->read($input); - $this->new_parts []= $elem1101; + $elem1136 = null; + $elem1136 = new \metastore\Partition(); + $xfer += $elem1136->read($input); + $this->new_parts []= $elem1136; } $xfer += $input->readListEnd(); } else { @@ -31145,9 +31277,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1102) + foreach ($this->new_parts as $iter1137) { - $xfer += $iter1102->write($output); + $xfer += $iter1137->write($output); } } $output->writeListEnd(); @@ -31625,14 +31757,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1103 = 0; - $_etype1106 = 0; - $xfer += $input->readListBegin($_etype1106, $_size1103); - for ($_i1107 = 0; $_i1107 < $_size1103; ++$_i1107) + $_size1138 = 0; + $_etype1141 = 0; + $xfer += $input->readListBegin($_etype1141, $_size1138); + for ($_i1142 = 0; $_i1142 < $_size1138; ++$_i1142) { - $elem1108 = null; - $xfer += $input->readString($elem1108); - $this->part_vals []= $elem1108; + $elem1143 = null; + $xfer += $input->readString($elem1143); + $this->part_vals []= $elem1143; } $xfer += $input->readListEnd(); } else { @@ -31678,9 +31810,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1109) + foreach ($this->part_vals as $iter1144) { - $xfer += $output->writeString($iter1109); + $xfer += $output->writeString($iter1144); } } $output->writeListEnd(); @@ -31865,14 +31997,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1110 = 0; - $_etype1113 = 0; - $xfer += $input->readListBegin($_etype1113, $_size1110); - for ($_i1114 = 0; $_i1114 < $_size1110; ++$_i1114) + $_size1145 = 0; + $_etype1148 = 0; + $xfer += $input->readListBegin($_etype1148, $_size1145); + for ($_i1149 = 0; $_i1149 < $_size1145; ++$_i1149) { - $elem1115 = null; - $xfer += $input->readString($elem1115); - $this->part_vals []= $elem1115; + $elem1150 = null; + $xfer += $input->readString($elem1150); + $this->part_vals []= $elem1150; } $xfer += $input->readListEnd(); } else { @@ -31907,9 +32039,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1116) + foreach ($this->part_vals as $iter1151) { - $xfer += $output->writeString($iter1116); + $xfer += $output->writeString($iter1151); } } $output->writeListEnd(); @@ -32363,14 +32495,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1117 = 0; - $_etype1120 = 0; - $xfer += $input->readListBegin($_etype1120, $_size1117); - for ($_i1121 = 0; $_i1121 < $_size1117; ++$_i1121) + $_size1152 = 0; + $_etype1155 = 0; + $xfer += $input->readListBegin($_etype1155, $_size1152); + for ($_i1156 = 0; $_i1156 < $_size1152; ++$_i1156) { - $elem1122 = null; - $xfer += $input->readString($elem1122); - $this->success []= $elem1122; + $elem1157 = null; + $xfer += $input->readString($elem1157); + $this->success []= $elem1157; } $xfer += $input->readListEnd(); } else { @@ -32406,9 +32538,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1123) + foreach ($this->success as $iter1158) { - $xfer += $output->writeString($iter1123); + $xfer += $output->writeString($iter1158); } } $output->writeListEnd(); @@ -32568,17 +32700,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1124 = 0; - $_ktype1125 = 0; - $_vtype1126 = 0; - $xfer += $input->readMapBegin($_ktype1125, $_vtype1126, $_size1124); - for ($_i1128 = 0; $_i1128 < $_size1124; ++$_i1128) + $_size1159 = 0; + $_ktype1160 = 0; + $_vtype1161 = 0; + $xfer += $input->readMapBegin($_ktype1160, $_vtype1161, $_size1159); + for ($_i1163 = 0; $_i1163 < $_size1159; ++$_i1163) { - $key1129 = ''; - $val1130 = ''; - $xfer += $input->readString($key1129); - $xfer += $input->readString($val1130); - $this->success[$key1129] = $val1130; + $key1164 = ''; + $val1165 = ''; + $xfer += $input->readString($key1164); + $xfer += $input->readString($val1165); + $this->success[$key1164] = $val1165; } $xfer += $input->readMapEnd(); } else { @@ -32614,10 +32746,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1131 => $viter1132) + foreach ($this->success as $kiter1166 => $viter1167) { - $xfer += $output->writeString($kiter1131); - $xfer += $output->writeString($viter1132); + $xfer += $output->writeString($kiter1166); + $xfer += $output->writeString($viter1167); } } $output->writeMapEnd(); @@ -32737,17 +32869,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1133 = 0; - $_ktype1134 = 0; - $_vtype1135 = 0; - $xfer += $input->readMapBegin($_ktype1134, $_vtype1135, $_size1133); - for ($_i1137 = 0; $_i1137 < $_size1133; ++$_i1137) + $_size1168 = 0; + $_ktype1169 = 0; + $_vtype1170 = 0; + $xfer += $input->readMapBegin($_ktype1169, $_vtype1170, $_size1168); + for ($_i1172 = 0; $_i1172 < $_size1168; ++$_i1172) { - $key1138 = ''; - $val1139 = ''; - $xfer += $input->readString($key1138); - $xfer += $input->readString($val1139); - $this->part_vals[$key1138] = $val1139; + $key1173 = ''; + $val1174 = ''; + $xfer += $input->readString($key1173); + $xfer += $input->readString($val1174); + $this->part_vals[$key1173] = $val1174; } $xfer += $input->readMapEnd(); } else { @@ -32792,10 +32924,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1140 => $viter1141) + foreach ($this->part_vals as $kiter1175 => $viter1176) { - $xfer += $output->writeString($kiter1140); - $xfer += $output->writeString($viter1141); + $xfer += $output->writeString($kiter1175); + $xfer += $output->writeString($viter1176); } } $output->writeMapEnd(); @@ -33117,17 +33249,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1142 = 0; - $_ktype1143 = 0; - $_vtype1144 = 0; - $xfer += $input->readMapBegin($_ktype1143, $_vtype1144, $_size1142); - for ($_i1146 = 0; $_i1146 < $_size1142; ++$_i1146) + $_size1177 = 0; + $_ktype1178 = 0; + $_vtype1179 = 0; + $xfer += $input->readMapBegin($_ktype1178, $_vtype1179, $_size1177); + for ($_i1181 = 0; $_i1181 < $_size1177; ++$_i1181) { - $key1147 = ''; - $val1148 = ''; - $xfer += $input->readString($key1147); - $xfer += $input->readString($val1148); - $this->part_vals[$key1147] = $val1148; + $key1182 = ''; + $val1183 = ''; + $xfer += $input->readString($key1182); + $xfer += $input->readString($val1183); + $this->part_vals[$key1182] = $val1183; } $xfer += $input->readMapEnd(); } else { @@ -33172,10 +33304,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1149 => $viter1150) + foreach ($this->part_vals as $kiter1184 => $viter1185) { - $xfer += $output->writeString($kiter1149); - $xfer += $output->writeString($viter1150); + $xfer += $output->writeString($kiter1184); + $xfer += $output->writeString($viter1185); } } $output->writeMapEnd(); @@ -34649,15 +34781,15 @@ class ThriftHiveMetastore_get_indexes_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1151 = 0; - $_etype1154 = 0; - $xfer += $input->readListBegin($_etype1154, $_size1151); - for ($_i1155 = 0; $_i1155 < $_size1151; ++$_i1155) + $_size1186 = 0; + $_etype1189 = 0; + $xfer += $input->readListBegin($_etype1189, $_size1186); + for ($_i1190 = 0; $_i1190 < $_size1186; ++$_i1190) { - $elem1156 = null; - $elem1156 = new \metastore\Index(); - $xfer += $elem1156->read($input); - $this->success []= $elem1156; + $elem1191 = null; + $elem1191 = new \metastore\Index(); + $xfer += $elem1191->read($input); + $this->success []= $elem1191; } $xfer += $input->readListEnd(); } else { @@ -34701,9 +34833,9 @@ class ThriftHiveMetastore_get_indexes_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1157) + foreach ($this->success as $iter1192) { - $xfer += $iter1157->write($output); + $xfer += $iter1192->write($output); } } $output->writeListEnd(); @@ -34910,14 +35042,14 @@ class ThriftHiveMetastore_get_index_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1158 = 0; - $_etype1161 = 0; - $xfer += $input->readListBegin($_etype1161, $_size1158); - for ($_i1162 = 0; $_i1162 < $_size1158; ++$_i1162) + $_size1193 = 0; + $_etype1196 = 0; + $xfer += $input->readListBegin($_etype1196, $_size1193); + for ($_i1197 = 0; $_i1197 < $_size1193; ++$_i1197) { - $elem1163 = null; - $xfer += $input->readString($elem1163); - $this->success []= $elem1163; + $elem1198 = null; + $xfer += $input->readString($elem1198); + $this->success []= $elem1198; } $xfer += $input->readListEnd(); } else { @@ -34953,9 +35085,9 @@ class ThriftHiveMetastore_get_index_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1164) + foreach ($this->success as $iter1199) { - $xfer += $output->writeString($iter1164); + $xfer += $output->writeString($iter1199); } } $output->writeListEnd(); @@ -39269,14 +39401,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1165 = 0; - $_etype1168 = 0; - $xfer += $input->readListBegin($_etype1168, $_size1165); - for ($_i1169 = 0; $_i1169 < $_size1165; ++$_i1169) + $_size1200 = 0; + $_etype1203 = 0; + $xfer += $input->readListBegin($_etype1203, $_size1200); + for ($_i1204 = 0; $_i1204 < $_size1200; ++$_i1204) { - $elem1170 = null; - $xfer += $input->readString($elem1170); - $this->success []= $elem1170; + $elem1205 = null; + $xfer += $input->readString($elem1205); + $this->success []= $elem1205; } $xfer += $input->readListEnd(); } else { @@ -39312,9 +39444,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1171) + foreach ($this->success as $iter1206) { - $xfer += $output->writeString($iter1171); + $xfer += $output->writeString($iter1206); } } $output->writeListEnd(); @@ -40183,14 +40315,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1172 = 0; - $_etype1175 = 0; - $xfer += $input->readListBegin($_etype1175, $_size1172); - for ($_i1176 = 0; $_i1176 < $_size1172; ++$_i1176) + $_size1207 = 0; + $_etype1210 = 0; + $xfer += $input->readListBegin($_etype1210, $_size1207); + for ($_i1211 = 0; $_i1211 < $_size1207; ++$_i1211) { - $elem1177 = null; - $xfer += $input->readString($elem1177); - $this->success []= $elem1177; + $elem1212 = null; + $xfer += $input->readString($elem1212); + $this->success []= $elem1212; } $xfer += $input->readListEnd(); } else { @@ -40226,9 +40358,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1178) + foreach ($this->success as $iter1213) { - $xfer += $output->writeString($iter1178); + $xfer += $output->writeString($iter1213); } } $output->writeListEnd(); @@ -40919,15 +41051,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1179 = 0; - $_etype1182 = 0; - $xfer += $input->readListBegin($_etype1182, $_size1179); - for ($_i1183 = 0; $_i1183 < $_size1179; ++$_i1183) + $_size1214 = 0; + $_etype1217 = 0; + $xfer += $input->readListBegin($_etype1217, $_size1214); + for ($_i1218 = 0; $_i1218 < $_size1214; ++$_i1218) { - $elem1184 = null; - $elem1184 = new \metastore\Role(); - $xfer += $elem1184->read($input); - $this->success []= $elem1184; + $elem1219 = null; + $elem1219 = new \metastore\Role(); + $xfer += $elem1219->read($input); + $this->success []= $elem1219; } $xfer += $input->readListEnd(); } else { @@ -40963,9 +41095,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1185) + foreach ($this->success as $iter1220) { - $xfer += $iter1185->write($output); + $xfer += $iter1220->write($output); } } $output->writeListEnd(); @@ -41627,14 +41759,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1186 = 0; - $_etype1189 = 0; - $xfer += $input->readListBegin($_etype1189, $_size1186); - for ($_i1190 = 0; $_i1190 < $_size1186; ++$_i1190) + $_size1221 = 0; + $_etype1224 = 0; + $xfer += $input->readListBegin($_etype1224, $_size1221); + for ($_i1225 = 0; $_i1225 < $_size1221; ++$_i1225) { - $elem1191 = null; - $xfer += $input->readString($elem1191); - $this->group_names []= $elem1191; + $elem1226 = null; + $xfer += $input->readString($elem1226); + $this->group_names []= $elem1226; } $xfer += $input->readListEnd(); } else { @@ -41675,9 +41807,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1192) + foreach ($this->group_names as $iter1227) { - $xfer += $output->writeString($iter1192); + $xfer += $output->writeString($iter1227); } } $output->writeListEnd(); @@ -41985,15 +42117,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1193 = 0; - $_etype1196 = 0; - $xfer += $input->readListBegin($_etype1196, $_size1193); - for ($_i1197 = 0; $_i1197 < $_size1193; ++$_i1197) + $_size1228 = 0; + $_etype1231 = 0; + $xfer += $input->readListBegin($_etype1231, $_size1228); + for ($_i1232 = 0; $_i1232 < $_size1228; ++$_i1232) { - $elem1198 = null; - $elem1198 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1198->read($input); - $this->success []= $elem1198; + $elem1233 = null; + $elem1233 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1233->read($input); + $this->success []= $elem1233; } $xfer += $input->readListEnd(); } else { @@ -42029,9 +42161,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1199) + foreach ($this->success as $iter1234) { - $xfer += $iter1199->write($output); + $xfer += $iter1234->write($output); } } $output->writeListEnd(); @@ -42663,14 +42795,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1200 = 0; - $_etype1203 = 0; - $xfer += $input->readListBegin($_etype1203, $_size1200); - for ($_i1204 = 0; $_i1204 < $_size1200; ++$_i1204) + $_size1235 = 0; + $_etype1238 = 0; + $xfer += $input->readListBegin($_etype1238, $_size1235); + for ($_i1239 = 0; $_i1239 < $_size1235; ++$_i1239) { - $elem1205 = null; - $xfer += $input->readString($elem1205); - $this->group_names []= $elem1205; + $elem1240 = null; + $xfer += $input->readString($elem1240); + $this->group_names []= $elem1240; } $xfer += $input->readListEnd(); } else { @@ -42703,9 +42835,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1206) + foreach ($this->group_names as $iter1241) { - $xfer += $output->writeString($iter1206); + $xfer += $output->writeString($iter1241); } } $output->writeListEnd(); @@ -42781,14 +42913,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1207 = 0; - $_etype1210 = 0; - $xfer += $input->readListBegin($_etype1210, $_size1207); - for ($_i1211 = 0; $_i1211 < $_size1207; ++$_i1211) + $_size1242 = 0; + $_etype1245 = 0; + $xfer += $input->readListBegin($_etype1245, $_size1242); + for ($_i1246 = 0; $_i1246 < $_size1242; ++$_i1246) { - $elem1212 = null; - $xfer += $input->readString($elem1212); - $this->success []= $elem1212; + $elem1247 = null; + $xfer += $input->readString($elem1247); + $this->success []= $elem1247; } $xfer += $input->readListEnd(); } else { @@ -42824,9 +42956,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1213) + foreach ($this->success as $iter1248) { - $xfer += $output->writeString($iter1213); + $xfer += $output->writeString($iter1248); } } $output->writeListEnd(); @@ -43943,14 +44075,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1214 = 0; - $_etype1217 = 0; - $xfer += $input->readListBegin($_etype1217, $_size1214); - for ($_i1218 = 0; $_i1218 < $_size1214; ++$_i1218) + $_size1249 = 0; + $_etype1252 = 0; + $xfer += $input->readListBegin($_etype1252, $_size1249); + for ($_i1253 = 0; $_i1253 < $_size1249; ++$_i1253) { - $elem1219 = null; - $xfer += $input->readString($elem1219); - $this->success []= $elem1219; + $elem1254 = null; + $xfer += $input->readString($elem1254); + $this->success []= $elem1254; } $xfer += $input->readListEnd(); } else { @@ -43978,9 +44110,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1220) + foreach ($this->success as $iter1255) { - $xfer += $output->writeString($iter1220); + $xfer += $output->writeString($iter1255); } } $output->writeListEnd(); @@ -44619,14 +44751,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1221 = 0; - $_etype1224 = 0; - $xfer += $input->readListBegin($_etype1224, $_size1221); - for ($_i1225 = 0; $_i1225 < $_size1221; ++$_i1225) + $_size1256 = 0; + $_etype1259 = 0; + $xfer += $input->readListBegin($_etype1259, $_size1256); + for ($_i1260 = 0; $_i1260 < $_size1256; ++$_i1260) { - $elem1226 = null; - $xfer += $input->readString($elem1226); - $this->success []= $elem1226; + $elem1261 = null; + $xfer += $input->readString($elem1261); + $this->success []= $elem1261; } $xfer += $input->readListEnd(); } else { @@ -44654,9 +44786,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1227) + foreach ($this->success as $iter1262) { - $xfer += $output->writeString($iter1227); + $xfer += $output->writeString($iter1262); } } $output->writeListEnd(); @@ -45586,6 +45718,451 @@ class ThriftHiveMetastore_commit_txn_result { } +class ThriftHiveMetastore_get_open_write_ids_args { + static $_TSPEC; + + /** + * @var \metastore\GetOpenWriteIdsRequest + */ + public $rqst = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'rqst', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetOpenWriteIdsRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['rqst'])) { + $this->rqst = $vals['rqst']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_open_write_ids_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->rqst = new \metastore\GetOpenWriteIdsRequest(); + $xfer += $this->rqst->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_open_write_ids_args'); + if ($this->rqst !== null) { + if (!is_object($this->rqst)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); + $xfer += $this->rqst->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_open_write_ids_result { + static $_TSPEC; + + /** + * @var \metastore\GetOpenWriteIdsResponse + */ + public $success = null; + /** + * @var \metastore\NoSuchTxnException + */ + public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetOpenWriteIdsResponse', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchTxnException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_open_write_ids_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\GetOpenWriteIdsResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchTxnException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_open_write_ids_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_allocate_table_write_id_args { + static $_TSPEC; + + /** + * @var \metastore\AllocateTableWriteIdRequest + */ + public $rqst = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'rqst', + 'type' => TType::STRUCT, + 'class' => '\metastore\AllocateTableWriteIdRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['rqst'])) { + $this->rqst = $vals['rqst']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_allocate_table_write_id_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->rqst = new \metastore\AllocateTableWriteIdRequest(); + $xfer += $this->rqst->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_allocate_table_write_id_args'); + if ($this->rqst !== null) { + if (!is_object($this->rqst)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); + $xfer += $this->rqst->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_allocate_table_write_id_result { + static $_TSPEC; + + /** + * @var \metastore\AllocateTableWriteIdResponse + */ + public $success = null; + /** + * @var \metastore\NoSuchTxnException + */ + public $o1 = null; + /** + * @var \metastore\TxnAbortedException + */ + public $o2 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\AllocateTableWriteIdResponse', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchTxnException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\TxnAbortedException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_allocate_table_write_id_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\AllocateTableWriteIdResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchTxnException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\TxnAbortedException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_allocate_table_write_id_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_lock_args { static $_TSPEC; diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index a5b578e..5a79892 100644 --- a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -14336,6 +14336,774 @@ class CommitTxnRequest { } +class GetOpenWriteIdsRequest { + static $_TSPEC; + + /** + * @var string[] + */ + public $tableNames = null; + /** + * @var string + */ + public $validTxnStr = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'tableNames', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 2 => array( + 'var' => 'validTxnStr', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['tableNames'])) { + $this->tableNames = $vals['tableNames']; + } + if (isset($vals['validTxnStr'])) { + $this->validTxnStr = $vals['validTxnStr']; + } + } + } + + public function getName() { + return 'GetOpenWriteIdsRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->tableNames = array(); + $_size490 = 0; + $_etype493 = 0; + $xfer += $input->readListBegin($_etype493, $_size490); + for ($_i494 = 0; $_i494 < $_size490; ++$_i494) + { + $elem495 = null; + $xfer += $input->readString($elem495); + $this->tableNames []= $elem495; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnStr); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetOpenWriteIdsRequest'); + if ($this->tableNames !== null) { + if (!is_array($this->tableNames)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('tableNames', TType::LST, 1); + { + $output->writeListBegin(TType::STRING, count($this->tableNames)); + { + foreach ($this->tableNames as $iter496) + { + $xfer += $output->writeString($iter496); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->validTxnStr !== null) { + $xfer += $output->writeFieldBegin('validTxnStr', TType::STRING, 2); + $xfer += $output->writeString($this->validTxnStr); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class OpenWriteIds { + static $_TSPEC; + + /** + * @var string + */ + public $tableName = null; + /** + * @var int + */ + public $writeIdHighWaterMark = null; + /** + * @var int[] + */ + public $openWriteIds = null; + /** + * @var int + */ + public $minWriteId = null; + /** + * @var string + */ + public $abortedBits = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'tableName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'writeIdHighWaterMark', + 'type' => TType::I64, + ), + 3 => array( + 'var' => 'openWriteIds', + 'type' => TType::LST, + 'etype' => TType::I64, + 'elem' => array( + 'type' => TType::I64, + ), + ), + 4 => array( + 'var' => 'minWriteId', + 'type' => TType::I64, + ), + 5 => array( + 'var' => 'abortedBits', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['tableName'])) { + $this->tableName = $vals['tableName']; + } + if (isset($vals['writeIdHighWaterMark'])) { + $this->writeIdHighWaterMark = $vals['writeIdHighWaterMark']; + } + if (isset($vals['openWriteIds'])) { + $this->openWriteIds = $vals['openWriteIds']; + } + if (isset($vals['minWriteId'])) { + $this->minWriteId = $vals['minWriteId']; + } + if (isset($vals['abortedBits'])) { + $this->abortedBits = $vals['abortedBits']; + } + } + } + + public function getName() { + return 'OpenWriteIds'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tableName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeIdHighWaterMark); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::LST) { + $this->openWriteIds = array(); + $_size497 = 0; + $_etype500 = 0; + $xfer += $input->readListBegin($_etype500, $_size497); + for ($_i501 = 0; $_i501 < $_size497; ++$_i501) + { + $elem502 = null; + $xfer += $input->readI64($elem502); + $this->openWriteIds []= $elem502; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->minWriteId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->abortedBits); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('OpenWriteIds'); + if ($this->tableName !== null) { + $xfer += $output->writeFieldBegin('tableName', TType::STRING, 1); + $xfer += $output->writeString($this->tableName); + $xfer += $output->writeFieldEnd(); + } + if ($this->writeIdHighWaterMark !== null) { + $xfer += $output->writeFieldBegin('writeIdHighWaterMark', TType::I64, 2); + $xfer += $output->writeI64($this->writeIdHighWaterMark); + $xfer += $output->writeFieldEnd(); + } + if ($this->openWriteIds !== null) { + if (!is_array($this->openWriteIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('openWriteIds', TType::LST, 3); + { + $output->writeListBegin(TType::I64, count($this->openWriteIds)); + { + foreach ($this->openWriteIds as $iter503) + { + $xfer += $output->writeI64($iter503); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->minWriteId !== null) { + $xfer += $output->writeFieldBegin('minWriteId', TType::I64, 4); + $xfer += $output->writeI64($this->minWriteId); + $xfer += $output->writeFieldEnd(); + } + if ($this->abortedBits !== null) { + $xfer += $output->writeFieldBegin('abortedBits', TType::STRING, 5); + $xfer += $output->writeString($this->abortedBits); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetOpenWriteIdsResponse { + static $_TSPEC; + + /** + * @var \metastore\OpenWriteIds[] + */ + public $openWriteIds = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'openWriteIds', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\OpenWriteIds', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['openWriteIds'])) { + $this->openWriteIds = $vals['openWriteIds']; + } + } + } + + public function getName() { + return 'GetOpenWriteIdsResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->openWriteIds = array(); + $_size504 = 0; + $_etype507 = 0; + $xfer += $input->readListBegin($_etype507, $_size504); + for ($_i508 = 0; $_i508 < $_size504; ++$_i508) + { + $elem509 = null; + $elem509 = new \metastore\OpenWriteIds(); + $xfer += $elem509->read($input); + $this->openWriteIds []= $elem509; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetOpenWriteIdsResponse'); + if ($this->openWriteIds !== null) { + if (!is_array($this->openWriteIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('openWriteIds', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->openWriteIds)); + { + foreach ($this->openWriteIds as $iter510) + { + $xfer += $iter510->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class AllocateTableWriteIdRequest { + static $_TSPEC; + + /** + * @var int[] + */ + public $txnIds = null; + /** + * @var string + */ + public $dbName = null; + /** + * @var string + */ + public $tableName = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'txnIds', + 'type' => TType::LST, + 'etype' => TType::I64, + 'elem' => array( + 'type' => TType::I64, + ), + ), + 2 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'tableName', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['txnIds'])) { + $this->txnIds = $vals['txnIds']; + } + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tableName'])) { + $this->tableName = $vals['tableName']; + } + } + } + + public function getName() { + return 'AllocateTableWriteIdRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->txnIds = array(); + $_size511 = 0; + $_etype514 = 0; + $xfer += $input->readListBegin($_etype514, $_size511); + for ($_i515 = 0; $_i515 < $_size511; ++$_i515) + { + $elem516 = null; + $xfer += $input->readI64($elem516); + $this->txnIds []= $elem516; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tableName); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('AllocateTableWriteIdRequest'); + if ($this->txnIds !== null) { + if (!is_array($this->txnIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('txnIds', TType::LST, 1); + { + $output->writeListBegin(TType::I64, count($this->txnIds)); + { + foreach ($this->txnIds as $iter517) + { + $xfer += $output->writeI64($iter517); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 2); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tableName !== null) { + $xfer += $output->writeFieldBegin('tableName', TType::STRING, 3); + $xfer += $output->writeString($this->tableName); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class TxnToWriteId { + static $_TSPEC; + + /** + * @var int + */ + public $txnId = null; + /** + * @var int + */ + public $writeId = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 2 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } + } + } + + public function getName() { + return 'TxnToWriteId'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TxnToWriteId'); + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 1); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 2); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class AllocateTableWriteIdResponse { + static $_TSPEC; + + /** + * @var \metastore\TxnToWriteId[] + */ + public $txnToWriteIds = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'txnToWriteIds', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\TxnToWriteId', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['txnToWriteIds'])) { + $this->txnToWriteIds = $vals['txnToWriteIds']; + } + } + } + + public function getName() { + return 'AllocateTableWriteIdResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->txnToWriteIds = array(); + $_size518 = 0; + $_etype521 = 0; + $xfer += $input->readListBegin($_etype521, $_size518); + for ($_i522 = 0; $_i522 < $_size518; ++$_i522) + { + $elem523 = null; + $elem523 = new \metastore\TxnToWriteId(); + $xfer += $elem523->read($input); + $this->txnToWriteIds []= $elem523; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('AllocateTableWriteIdResponse'); + if ($this->txnToWriteIds !== null) { + if (!is_array($this->txnToWriteIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('txnToWriteIds', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->txnToWriteIds)); + { + foreach ($this->txnToWriteIds as $iter524) + { + $xfer += $iter524->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class LockComponent { static $_TSPEC; @@ -14667,15 +15435,15 @@ class LockRequest { case 1: if ($ftype == TType::LST) { $this->component = array(); - $_size490 = 0; - $_etype493 = 0; - $xfer += $input->readListBegin($_etype493, $_size490); - for ($_i494 = 0; $_i494 < $_size490; ++$_i494) + $_size525 = 0; + $_etype528 = 0; + $xfer += $input->readListBegin($_etype528, $_size525); + for ($_i529 = 0; $_i529 < $_size525; ++$_i529) { - $elem495 = null; - $elem495 = new \metastore\LockComponent(); - $xfer += $elem495->read($input); - $this->component []= $elem495; + $elem530 = null; + $elem530 = new \metastore\LockComponent(); + $xfer += $elem530->read($input); + $this->component []= $elem530; } $xfer += $input->readListEnd(); } else { @@ -14731,9 +15499,9 @@ class LockRequest { { $output->writeListBegin(TType::STRUCT, count($this->component)); { - foreach ($this->component as $iter496) + foreach ($this->component as $iter531) { - $xfer += $iter496->write($output); + $xfer += $iter531->write($output); } } $output->writeListEnd(); @@ -15676,15 +16444,15 @@ class ShowLocksResponse { case 1: if ($ftype == TType::LST) { $this->locks = array(); - $_size497 = 0; - $_etype500 = 0; - $xfer += $input->readListBegin($_etype500, $_size497); - for ($_i501 = 0; $_i501 < $_size497; ++$_i501) + $_size532 = 0; + $_etype535 = 0; + $xfer += $input->readListBegin($_etype535, $_size532); + for ($_i536 = 0; $_i536 < $_size532; ++$_i536) { - $elem502 = null; - $elem502 = new \metastore\ShowLocksResponseElement(); - $xfer += $elem502->read($input); - $this->locks []= $elem502; + $elem537 = null; + $elem537 = new \metastore\ShowLocksResponseElement(); + $xfer += $elem537->read($input); + $this->locks []= $elem537; } $xfer += $input->readListEnd(); } else { @@ -15712,9 +16480,9 @@ class ShowLocksResponse { { $output->writeListBegin(TType::STRUCT, count($this->locks)); { - foreach ($this->locks as $iter503) + foreach ($this->locks as $iter538) { - $xfer += $iter503->write($output); + $xfer += $iter538->write($output); } } $output->writeListEnd(); @@ -15989,17 +16757,17 @@ class HeartbeatTxnRangeResponse { case 1: if ($ftype == TType::SET) { $this->aborted = array(); - $_size504 = 0; - $_etype507 = 0; - $xfer += $input->readSetBegin($_etype507, $_size504); - for ($_i508 = 0; $_i508 < $_size504; ++$_i508) + $_size539 = 0; + $_etype542 = 0; + $xfer += $input->readSetBegin($_etype542, $_size539); + for ($_i543 = 0; $_i543 < $_size539; ++$_i543) { - $elem509 = null; - $xfer += $input->readI64($elem509); - if (is_scalar($elem509)) { - $this->aborted[$elem509] = true; + $elem544 = null; + $xfer += $input->readI64($elem544); + if (is_scalar($elem544)) { + $this->aborted[$elem544] = true; } else { - $this->aborted []= $elem509; + $this->aborted []= $elem544; } } $xfer += $input->readSetEnd(); @@ -16010,17 +16778,17 @@ class HeartbeatTxnRangeResponse { case 2: if ($ftype == TType::SET) { $this->nosuch = array(); - $_size510 = 0; - $_etype513 = 0; - $xfer += $input->readSetBegin($_etype513, $_size510); - for ($_i514 = 0; $_i514 < $_size510; ++$_i514) + $_size545 = 0; + $_etype548 = 0; + $xfer += $input->readSetBegin($_etype548, $_size545); + for ($_i549 = 0; $_i549 < $_size545; ++$_i549) { - $elem515 = null; - $xfer += $input->readI64($elem515); - if (is_scalar($elem515)) { - $this->nosuch[$elem515] = true; + $elem550 = null; + $xfer += $input->readI64($elem550); + if (is_scalar($elem550)) { + $this->nosuch[$elem550] = true; } else { - $this->nosuch []= $elem515; + $this->nosuch []= $elem550; } } $xfer += $input->readSetEnd(); @@ -16049,12 +16817,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->aborted)); { - foreach ($this->aborted as $iter516 => $iter517) + foreach ($this->aborted as $iter551 => $iter552) { - if (is_scalar($iter517)) { - $xfer += $output->writeI64($iter516); + if (is_scalar($iter552)) { + $xfer += $output->writeI64($iter551); } else { - $xfer += $output->writeI64($iter517); + $xfer += $output->writeI64($iter552); } } } @@ -16070,12 +16838,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->nosuch)); { - foreach ($this->nosuch as $iter518 => $iter519) + foreach ($this->nosuch as $iter553 => $iter554) { - if (is_scalar($iter519)) { - $xfer += $output->writeI64($iter518); + if (is_scalar($iter554)) { + $xfer += $output->writeI64($iter553); } else { - $xfer += $output->writeI64($iter519); + $xfer += $output->writeI64($iter554); } } } @@ -16234,17 +17002,17 @@ class CompactionRequest { case 6: if ($ftype == TType::MAP) { $this->properties = array(); - $_size520 = 0; - $_ktype521 = 0; - $_vtype522 = 0; - $xfer += $input->readMapBegin($_ktype521, $_vtype522, $_size520); - for ($_i524 = 0; $_i524 < $_size520; ++$_i524) + $_size555 = 0; + $_ktype556 = 0; + $_vtype557 = 0; + $xfer += $input->readMapBegin($_ktype556, $_vtype557, $_size555); + for ($_i559 = 0; $_i559 < $_size555; ++$_i559) { - $key525 = ''; - $val526 = ''; - $xfer += $input->readString($key525); - $xfer += $input->readString($val526); - $this->properties[$key525] = $val526; + $key560 = ''; + $val561 = ''; + $xfer += $input->readString($key560); + $xfer += $input->readString($val561); + $this->properties[$key560] = $val561; } $xfer += $input->readMapEnd(); } else { @@ -16297,10 +17065,10 @@ class CompactionRequest { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter527 => $viter528) + foreach ($this->properties as $kiter562 => $viter563) { - $xfer += $output->writeString($kiter527); - $xfer += $output->writeString($viter528); + $xfer += $output->writeString($kiter562); + $xfer += $output->writeString($viter563); } } $output->writeMapEnd(); @@ -16887,15 +17655,15 @@ class ShowCompactResponse { case 1: if ($ftype == TType::LST) { $this->compacts = array(); - $_size529 = 0; - $_etype532 = 0; - $xfer += $input->readListBegin($_etype532, $_size529); - for ($_i533 = 0; $_i533 < $_size529; ++$_i533) + $_size564 = 0; + $_etype567 = 0; + $xfer += $input->readListBegin($_etype567, $_size564); + for ($_i568 = 0; $_i568 < $_size564; ++$_i568) { - $elem534 = null; - $elem534 = new \metastore\ShowCompactResponseElement(); - $xfer += $elem534->read($input); - $this->compacts []= $elem534; + $elem569 = null; + $elem569 = new \metastore\ShowCompactResponseElement(); + $xfer += $elem569->read($input); + $this->compacts []= $elem569; } $xfer += $input->readListEnd(); } else { @@ -16923,9 +17691,9 @@ class ShowCompactResponse { { $output->writeListBegin(TType::STRUCT, count($this->compacts)); { - foreach ($this->compacts as $iter535) + foreach ($this->compacts as $iter570) { - $xfer += $iter535->write($output); + $xfer += $iter570->write($output); } } $output->writeListEnd(); @@ -17054,14 +17822,14 @@ class AddDynamicPartitions { case 4: if ($ftype == TType::LST) { $this->partitionnames = array(); - $_size536 = 0; - $_etype539 = 0; - $xfer += $input->readListBegin($_etype539, $_size536); - for ($_i540 = 0; $_i540 < $_size536; ++$_i540) + $_size571 = 0; + $_etype574 = 0; + $xfer += $input->readListBegin($_etype574, $_size571); + for ($_i575 = 0; $_i575 < $_size571; ++$_i575) { - $elem541 = null; - $xfer += $input->readString($elem541); - $this->partitionnames []= $elem541; + $elem576 = null; + $xfer += $input->readString($elem576); + $this->partitionnames []= $elem576; } $xfer += $input->readListEnd(); } else { @@ -17111,9 +17879,9 @@ class AddDynamicPartitions { { $output->writeListBegin(TType::STRING, count($this->partitionnames)); { - foreach ($this->partitionnames as $iter542) + foreach ($this->partitionnames as $iter577) { - $xfer += $output->writeString($iter542); + $xfer += $output->writeString($iter577); } } $output->writeListEnd(); @@ -17419,17 +18187,17 @@ class CreationMetadata { case 3: if ($ftype == TType::SET) { $this->tablesUsed = array(); - $_size543 = 0; - $_etype546 = 0; - $xfer += $input->readSetBegin($_etype546, $_size543); - for ($_i547 = 0; $_i547 < $_size543; ++$_i547) + $_size578 = 0; + $_etype581 = 0; + $xfer += $input->readSetBegin($_etype581, $_size578); + for ($_i582 = 0; $_i582 < $_size578; ++$_i582) { - $elem548 = null; - $xfer += $input->readString($elem548); - if (is_scalar($elem548)) { - $this->tablesUsed[$elem548] = true; + $elem583 = null; + $xfer += $input->readString($elem583); + if (is_scalar($elem583)) { + $this->tablesUsed[$elem583] = true; } else { - $this->tablesUsed []= $elem548; + $this->tablesUsed []= $elem583; } } $xfer += $input->readSetEnd(); @@ -17475,12 +18243,12 @@ class CreationMetadata { { $output->writeSetBegin(TType::STRING, count($this->tablesUsed)); { - foreach ($this->tablesUsed as $iter549 => $iter550) + foreach ($this->tablesUsed as $iter584 => $iter585) { - if (is_scalar($iter550)) { - $xfer += $output->writeString($iter549); + if (is_scalar($iter585)) { + $xfer += $output->writeString($iter584); } else { - $xfer += $output->writeString($iter550); + $xfer += $output->writeString($iter585); } } } @@ -17862,15 +18630,15 @@ class NotificationEventResponse { case 1: if ($ftype == TType::LST) { $this->events = array(); - $_size551 = 0; - $_etype554 = 0; - $xfer += $input->readListBegin($_etype554, $_size551); - for ($_i555 = 0; $_i555 < $_size551; ++$_i555) + $_size586 = 0; + $_etype589 = 0; + $xfer += $input->readListBegin($_etype589, $_size586); + for ($_i590 = 0; $_i590 < $_size586; ++$_i590) { - $elem556 = null; - $elem556 = new \metastore\NotificationEvent(); - $xfer += $elem556->read($input); - $this->events []= $elem556; + $elem591 = null; + $elem591 = new \metastore\NotificationEvent(); + $xfer += $elem591->read($input); + $this->events []= $elem591; } $xfer += $input->readListEnd(); } else { @@ -17898,9 +18666,9 @@ class NotificationEventResponse { { $output->writeListBegin(TType::STRUCT, count($this->events)); { - foreach ($this->events as $iter557) + foreach ($this->events as $iter592) { - $xfer += $iter557->write($output); + $xfer += $iter592->write($output); } } $output->writeListEnd(); @@ -18245,14 +19013,14 @@ class InsertEventRequestData { case 2: if ($ftype == TType::LST) { $this->filesAdded = array(); - $_size558 = 0; - $_etype561 = 0; - $xfer += $input->readListBegin($_etype561, $_size558); - for ($_i562 = 0; $_i562 < $_size558; ++$_i562) + $_size593 = 0; + $_etype596 = 0; + $xfer += $input->readListBegin($_etype596, $_size593); + for ($_i597 = 0; $_i597 < $_size593; ++$_i597) { - $elem563 = null; - $xfer += $input->readString($elem563); - $this->filesAdded []= $elem563; + $elem598 = null; + $xfer += $input->readString($elem598); + $this->filesAdded []= $elem598; } $xfer += $input->readListEnd(); } else { @@ -18262,14 +19030,14 @@ class InsertEventRequestData { case 3: if ($ftype == TType::LST) { $this->filesAddedChecksum = array(); - $_size564 = 0; - $_etype567 = 0; - $xfer += $input->readListBegin($_etype567, $_size564); - for ($_i568 = 0; $_i568 < $_size564; ++$_i568) + $_size599 = 0; + $_etype602 = 0; + $xfer += $input->readListBegin($_etype602, $_size599); + for ($_i603 = 0; $_i603 < $_size599; ++$_i603) { - $elem569 = null; - $xfer += $input->readString($elem569); - $this->filesAddedChecksum []= $elem569; + $elem604 = null; + $xfer += $input->readString($elem604); + $this->filesAddedChecksum []= $elem604; } $xfer += $input->readListEnd(); } else { @@ -18302,9 +19070,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAdded)); { - foreach ($this->filesAdded as $iter570) + foreach ($this->filesAdded as $iter605) { - $xfer += $output->writeString($iter570); + $xfer += $output->writeString($iter605); } } $output->writeListEnd(); @@ -18319,9 +19087,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAddedChecksum)); { - foreach ($this->filesAddedChecksum as $iter571) + foreach ($this->filesAddedChecksum as $iter606) { - $xfer += $output->writeString($iter571); + $xfer += $output->writeString($iter606); } } $output->writeListEnd(); @@ -18539,14 +19307,14 @@ class FireEventRequest { case 5: if ($ftype == TType::LST) { $this->partitionVals = array(); - $_size572 = 0; - $_etype575 = 0; - $xfer += $input->readListBegin($_etype575, $_size572); - for ($_i576 = 0; $_i576 < $_size572; ++$_i576) + $_size607 = 0; + $_etype610 = 0; + $xfer += $input->readListBegin($_etype610, $_size607); + for ($_i611 = 0; $_i611 < $_size607; ++$_i611) { - $elem577 = null; - $xfer += $input->readString($elem577); - $this->partitionVals []= $elem577; + $elem612 = null; + $xfer += $input->readString($elem612); + $this->partitionVals []= $elem612; } $xfer += $input->readListEnd(); } else { @@ -18597,9 +19365,9 @@ class FireEventRequest { { $output->writeListBegin(TType::STRING, count($this->partitionVals)); { - foreach ($this->partitionVals as $iter578) + foreach ($this->partitionVals as $iter613) { - $xfer += $output->writeString($iter578); + $xfer += $output->writeString($iter613); } } $output->writeListEnd(); @@ -18827,18 +19595,18 @@ class GetFileMetadataByExprResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size579 = 0; - $_ktype580 = 0; - $_vtype581 = 0; - $xfer += $input->readMapBegin($_ktype580, $_vtype581, $_size579); - for ($_i583 = 0; $_i583 < $_size579; ++$_i583) + $_size614 = 0; + $_ktype615 = 0; + $_vtype616 = 0; + $xfer += $input->readMapBegin($_ktype615, $_vtype616, $_size614); + for ($_i618 = 0; $_i618 < $_size614; ++$_i618) { - $key584 = 0; - $val585 = new \metastore\MetadataPpdResult(); - $xfer += $input->readI64($key584); - $val585 = new \metastore\MetadataPpdResult(); - $xfer += $val585->read($input); - $this->metadata[$key584] = $val585; + $key619 = 0; + $val620 = new \metastore\MetadataPpdResult(); + $xfer += $input->readI64($key619); + $val620 = new \metastore\MetadataPpdResult(); + $xfer += $val620->read($input); + $this->metadata[$key619] = $val620; } $xfer += $input->readMapEnd(); } else { @@ -18873,10 +19641,10 @@ class GetFileMetadataByExprResult { { $output->writeMapBegin(TType::I64, TType::STRUCT, count($this->metadata)); { - foreach ($this->metadata as $kiter586 => $viter587) + foreach ($this->metadata as $kiter621 => $viter622) { - $xfer += $output->writeI64($kiter586); - $xfer += $viter587->write($output); + $xfer += $output->writeI64($kiter621); + $xfer += $viter622->write($output); } } $output->writeMapEnd(); @@ -18978,14 +19746,14 @@ class GetFileMetadataByExprRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size588 = 0; - $_etype591 = 0; - $xfer += $input->readListBegin($_etype591, $_size588); - for ($_i592 = 0; $_i592 < $_size588; ++$_i592) + $_size623 = 0; + $_etype626 = 0; + $xfer += $input->readListBegin($_etype626, $_size623); + for ($_i627 = 0; $_i627 < $_size623; ++$_i627) { - $elem593 = null; - $xfer += $input->readI64($elem593); - $this->fileIds []= $elem593; + $elem628 = null; + $xfer += $input->readI64($elem628); + $this->fileIds []= $elem628; } $xfer += $input->readListEnd(); } else { @@ -19034,9 +19802,9 @@ class GetFileMetadataByExprRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter594) + foreach ($this->fileIds as $iter629) { - $xfer += $output->writeI64($iter594); + $xfer += $output->writeI64($iter629); } } $output->writeListEnd(); @@ -19130,17 +19898,17 @@ class GetFileMetadataResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size595 = 0; - $_ktype596 = 0; - $_vtype597 = 0; - $xfer += $input->readMapBegin($_ktype596, $_vtype597, $_size595); - for ($_i599 = 0; $_i599 < $_size595; ++$_i599) + $_size630 = 0; + $_ktype631 = 0; + $_vtype632 = 0; + $xfer += $input->readMapBegin($_ktype631, $_vtype632, $_size630); + for ($_i634 = 0; $_i634 < $_size630; ++$_i634) { - $key600 = 0; - $val601 = ''; - $xfer += $input->readI64($key600); - $xfer += $input->readString($val601); - $this->metadata[$key600] = $val601; + $key635 = 0; + $val636 = ''; + $xfer += $input->readI64($key635); + $xfer += $input->readString($val636); + $this->metadata[$key635] = $val636; } $xfer += $input->readMapEnd(); } else { @@ -19175,10 +19943,10 @@ class GetFileMetadataResult { { $output->writeMapBegin(TType::I64, TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $kiter602 => $viter603) + foreach ($this->metadata as $kiter637 => $viter638) { - $xfer += $output->writeI64($kiter602); - $xfer += $output->writeString($viter603); + $xfer += $output->writeI64($kiter637); + $xfer += $output->writeString($viter638); } } $output->writeMapEnd(); @@ -19247,14 +20015,14 @@ class GetFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size604 = 0; - $_etype607 = 0; - $xfer += $input->readListBegin($_etype607, $_size604); - for ($_i608 = 0; $_i608 < $_size604; ++$_i608) + $_size639 = 0; + $_etype642 = 0; + $xfer += $input->readListBegin($_etype642, $_size639); + for ($_i643 = 0; $_i643 < $_size639; ++$_i643) { - $elem609 = null; - $xfer += $input->readI64($elem609); - $this->fileIds []= $elem609; + $elem644 = null; + $xfer += $input->readI64($elem644); + $this->fileIds []= $elem644; } $xfer += $input->readListEnd(); } else { @@ -19282,9 +20050,9 @@ class GetFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter610) + foreach ($this->fileIds as $iter645) { - $xfer += $output->writeI64($iter610); + $xfer += $output->writeI64($iter645); } } $output->writeListEnd(); @@ -19424,14 +20192,14 @@ class PutFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size611 = 0; - $_etype614 = 0; - $xfer += $input->readListBegin($_etype614, $_size611); - for ($_i615 = 0; $_i615 < $_size611; ++$_i615) + $_size646 = 0; + $_etype649 = 0; + $xfer += $input->readListBegin($_etype649, $_size646); + for ($_i650 = 0; $_i650 < $_size646; ++$_i650) { - $elem616 = null; - $xfer += $input->readI64($elem616); - $this->fileIds []= $elem616; + $elem651 = null; + $xfer += $input->readI64($elem651); + $this->fileIds []= $elem651; } $xfer += $input->readListEnd(); } else { @@ -19441,14 +20209,14 @@ class PutFileMetadataRequest { case 2: if ($ftype == TType::LST) { $this->metadata = array(); - $_size617 = 0; - $_etype620 = 0; - $xfer += $input->readListBegin($_etype620, $_size617); - for ($_i621 = 0; $_i621 < $_size617; ++$_i621) + $_size652 = 0; + $_etype655 = 0; + $xfer += $input->readListBegin($_etype655, $_size652); + for ($_i656 = 0; $_i656 < $_size652; ++$_i656) { - $elem622 = null; - $xfer += $input->readString($elem622); - $this->metadata []= $elem622; + $elem657 = null; + $xfer += $input->readString($elem657); + $this->metadata []= $elem657; } $xfer += $input->readListEnd(); } else { @@ -19483,9 +20251,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter623) + foreach ($this->fileIds as $iter658) { - $xfer += $output->writeI64($iter623); + $xfer += $output->writeI64($iter658); } } $output->writeListEnd(); @@ -19500,9 +20268,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $iter624) + foreach ($this->metadata as $iter659) { - $xfer += $output->writeString($iter624); + $xfer += $output->writeString($iter659); } } $output->writeListEnd(); @@ -19621,14 +20389,14 @@ class ClearFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size625 = 0; - $_etype628 = 0; - $xfer += $input->readListBegin($_etype628, $_size625); - for ($_i629 = 0; $_i629 < $_size625; ++$_i629) + $_size660 = 0; + $_etype663 = 0; + $xfer += $input->readListBegin($_etype663, $_size660); + for ($_i664 = 0; $_i664 < $_size660; ++$_i664) { - $elem630 = null; - $xfer += $input->readI64($elem630); - $this->fileIds []= $elem630; + $elem665 = null; + $xfer += $input->readI64($elem665); + $this->fileIds []= $elem665; } $xfer += $input->readListEnd(); } else { @@ -19656,9 +20424,9 @@ class ClearFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter631) + foreach ($this->fileIds as $iter666) { - $xfer += $output->writeI64($iter631); + $xfer += $output->writeI64($iter666); } } $output->writeListEnd(); @@ -19942,15 +20710,15 @@ class GetAllFunctionsResponse { case 1: if ($ftype == TType::LST) { $this->functions = array(); - $_size632 = 0; - $_etype635 = 0; - $xfer += $input->readListBegin($_etype635, $_size632); - for ($_i636 = 0; $_i636 < $_size632; ++$_i636) + $_size667 = 0; + $_etype670 = 0; + $xfer += $input->readListBegin($_etype670, $_size667); + for ($_i671 = 0; $_i671 < $_size667; ++$_i671) { - $elem637 = null; - $elem637 = new \metastore\Function(); - $xfer += $elem637->read($input); - $this->functions []= $elem637; + $elem672 = null; + $elem672 = new \metastore\Function(); + $xfer += $elem672->read($input); + $this->functions []= $elem672; } $xfer += $input->readListEnd(); } else { @@ -19978,9 +20746,9 @@ class GetAllFunctionsResponse { { $output->writeListBegin(TType::STRUCT, count($this->functions)); { - foreach ($this->functions as $iter638) + foreach ($this->functions as $iter673) { - $xfer += $iter638->write($output); + $xfer += $iter673->write($output); } } $output->writeListEnd(); @@ -20044,14 +20812,14 @@ class ClientCapabilities { case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size639 = 0; - $_etype642 = 0; - $xfer += $input->readListBegin($_etype642, $_size639); - for ($_i643 = 0; $_i643 < $_size639; ++$_i643) + $_size674 = 0; + $_etype677 = 0; + $xfer += $input->readListBegin($_etype677, $_size674); + for ($_i678 = 0; $_i678 < $_size674; ++$_i678) { - $elem644 = null; - $xfer += $input->readI32($elem644); - $this->values []= $elem644; + $elem679 = null; + $xfer += $input->readI32($elem679); + $this->values []= $elem679; } $xfer += $input->readListEnd(); } else { @@ -20079,9 +20847,9 @@ class ClientCapabilities { { $output->writeListBegin(TType::I32, count($this->values)); { - foreach ($this->values as $iter645) + foreach ($this->values as $iter680) { - $xfer += $output->writeI32($iter645); + $xfer += $output->writeI32($iter680); } } $output->writeListEnd(); @@ -20381,14 +21149,14 @@ class GetTablesRequest { case 2: if ($ftype == TType::LST) { $this->tblNames = array(); - $_size646 = 0; - $_etype649 = 0; - $xfer += $input->readListBegin($_etype649, $_size646); - for ($_i650 = 0; $_i650 < $_size646; ++$_i650) + $_size681 = 0; + $_etype684 = 0; + $xfer += $input->readListBegin($_etype684, $_size681); + for ($_i685 = 0; $_i685 < $_size681; ++$_i685) { - $elem651 = null; - $xfer += $input->readString($elem651); - $this->tblNames []= $elem651; + $elem686 = null; + $xfer += $input->readString($elem686); + $this->tblNames []= $elem686; } $xfer += $input->readListEnd(); } else { @@ -20429,9 +21197,9 @@ class GetTablesRequest { { $output->writeListBegin(TType::STRING, count($this->tblNames)); { - foreach ($this->tblNames as $iter652) + foreach ($this->tblNames as $iter687) { - $xfer += $output->writeString($iter652); + $xfer += $output->writeString($iter687); } } $output->writeListEnd(); @@ -20504,15 +21272,15 @@ class GetTablesResult { case 1: if ($ftype == TType::LST) { $this->tables = array(); - $_size653 = 0; - $_etype656 = 0; - $xfer += $input->readListBegin($_etype656, $_size653); - for ($_i657 = 0; $_i657 < $_size653; ++$_i657) + $_size688 = 0; + $_etype691 = 0; + $xfer += $input->readListBegin($_etype691, $_size688); + for ($_i692 = 0; $_i692 < $_size688; ++$_i692) { - $elem658 = null; - $elem658 = new \metastore\Table(); - $xfer += $elem658->read($input); - $this->tables []= $elem658; + $elem693 = null; + $elem693 = new \metastore\Table(); + $xfer += $elem693->read($input); + $this->tables []= $elem693; } $xfer += $input->readListEnd(); } else { @@ -20540,9 +21308,9 @@ class GetTablesResult { { $output->writeListBegin(TType::STRUCT, count($this->tables)); { - foreach ($this->tables as $iter659) + foreach ($this->tables as $iter694) { - $xfer += $iter659->write($output); + $xfer += $iter694->write($output); } } $output->writeListEnd(); @@ -20929,17 +21697,17 @@ class Materialization { case 2: if ($ftype == TType::SET) { $this->tablesUsed = array(); - $_size660 = 0; - $_etype663 = 0; - $xfer += $input->readSetBegin($_etype663, $_size660); - for ($_i664 = 0; $_i664 < $_size660; ++$_i664) + $_size695 = 0; + $_etype698 = 0; + $xfer += $input->readSetBegin($_etype698, $_size695); + for ($_i699 = 0; $_i699 < $_size695; ++$_i699) { - $elem665 = null; - $xfer += $input->readString($elem665); - if (is_scalar($elem665)) { - $this->tablesUsed[$elem665] = true; + $elem700 = null; + $xfer += $input->readString($elem700); + if (is_scalar($elem700)) { + $this->tablesUsed[$elem700] = true; } else { - $this->tablesUsed []= $elem665; + $this->tablesUsed []= $elem700; } } $xfer += $input->readSetEnd(); @@ -20983,12 +21751,12 @@ class Materialization { { $output->writeSetBegin(TType::STRING, count($this->tablesUsed)); { - foreach ($this->tablesUsed as $iter666 => $iter667) + foreach ($this->tablesUsed as $iter701 => $iter702) { - if (is_scalar($iter667)) { - $xfer += $output->writeString($iter666); + if (is_scalar($iter702)) { + $xfer += $output->writeString($iter701); } else { - $xfer += $output->writeString($iter667); + $xfer += $output->writeString($iter702); } } } @@ -22250,15 +23018,15 @@ class WMFullResourcePlan { case 2: if ($ftype == TType::LST) { $this->pools = array(); - $_size668 = 0; - $_etype671 = 0; - $xfer += $input->readListBegin($_etype671, $_size668); - for ($_i672 = 0; $_i672 < $_size668; ++$_i672) + $_size703 = 0; + $_etype706 = 0; + $xfer += $input->readListBegin($_etype706, $_size703); + for ($_i707 = 0; $_i707 < $_size703; ++$_i707) { - $elem673 = null; - $elem673 = new \metastore\WMPool(); - $xfer += $elem673->read($input); - $this->pools []= $elem673; + $elem708 = null; + $elem708 = new \metastore\WMPool(); + $xfer += $elem708->read($input); + $this->pools []= $elem708; } $xfer += $input->readListEnd(); } else { @@ -22268,15 +23036,15 @@ class WMFullResourcePlan { case 3: if ($ftype == TType::LST) { $this->mappings = array(); - $_size674 = 0; - $_etype677 = 0; - $xfer += $input->readListBegin($_etype677, $_size674); - for ($_i678 = 0; $_i678 < $_size674; ++$_i678) + $_size709 = 0; + $_etype712 = 0; + $xfer += $input->readListBegin($_etype712, $_size709); + for ($_i713 = 0; $_i713 < $_size709; ++$_i713) { - $elem679 = null; - $elem679 = new \metastore\WMMapping(); - $xfer += $elem679->read($input); - $this->mappings []= $elem679; + $elem714 = null; + $elem714 = new \metastore\WMMapping(); + $xfer += $elem714->read($input); + $this->mappings []= $elem714; } $xfer += $input->readListEnd(); } else { @@ -22286,15 +23054,15 @@ class WMFullResourcePlan { case 4: if ($ftype == TType::LST) { $this->triggers = array(); - $_size680 = 0; - $_etype683 = 0; - $xfer += $input->readListBegin($_etype683, $_size680); - for ($_i684 = 0; $_i684 < $_size680; ++$_i684) + $_size715 = 0; + $_etype718 = 0; + $xfer += $input->readListBegin($_etype718, $_size715); + for ($_i719 = 0; $_i719 < $_size715; ++$_i719) { - $elem685 = null; - $elem685 = new \metastore\WMTrigger(); - $xfer += $elem685->read($input); - $this->triggers []= $elem685; + $elem720 = null; + $elem720 = new \metastore\WMTrigger(); + $xfer += $elem720->read($input); + $this->triggers []= $elem720; } $xfer += $input->readListEnd(); } else { @@ -22304,15 +23072,15 @@ class WMFullResourcePlan { case 5: if ($ftype == TType::LST) { $this->poolTriggers = array(); - $_size686 = 0; - $_etype689 = 0; - $xfer += $input->readListBegin($_etype689, $_size686); - for ($_i690 = 0; $_i690 < $_size686; ++$_i690) + $_size721 = 0; + $_etype724 = 0; + $xfer += $input->readListBegin($_etype724, $_size721); + for ($_i725 = 0; $_i725 < $_size721; ++$_i725) { - $elem691 = null; - $elem691 = new \metastore\WMPoolTrigger(); - $xfer += $elem691->read($input); - $this->poolTriggers []= $elem691; + $elem726 = null; + $elem726 = new \metastore\WMPoolTrigger(); + $xfer += $elem726->read($input); + $this->poolTriggers []= $elem726; } $xfer += $input->readListEnd(); } else { @@ -22348,9 +23116,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->pools)); { - foreach ($this->pools as $iter692) + foreach ($this->pools as $iter727) { - $xfer += $iter692->write($output); + $xfer += $iter727->write($output); } } $output->writeListEnd(); @@ -22365,9 +23133,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->mappings)); { - foreach ($this->mappings as $iter693) + foreach ($this->mappings as $iter728) { - $xfer += $iter693->write($output); + $xfer += $iter728->write($output); } } $output->writeListEnd(); @@ -22382,9 +23150,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter694) + foreach ($this->triggers as $iter729) { - $xfer += $iter694->write($output); + $xfer += $iter729->write($output); } } $output->writeListEnd(); @@ -22399,9 +23167,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->poolTriggers)); { - foreach ($this->poolTriggers as $iter695) + foreach ($this->poolTriggers as $iter730) { - $xfer += $iter695->write($output); + $xfer += $iter730->write($output); } } $output->writeListEnd(); @@ -22954,15 +23722,15 @@ class WMGetAllResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->resourcePlans = array(); - $_size696 = 0; - $_etype699 = 0; - $xfer += $input->readListBegin($_etype699, $_size696); - for ($_i700 = 0; $_i700 < $_size696; ++$_i700) + $_size731 = 0; + $_etype734 = 0; + $xfer += $input->readListBegin($_etype734, $_size731); + for ($_i735 = 0; $_i735 < $_size731; ++$_i735) { - $elem701 = null; - $elem701 = new \metastore\WMResourcePlan(); - $xfer += $elem701->read($input); - $this->resourcePlans []= $elem701; + $elem736 = null; + $elem736 = new \metastore\WMResourcePlan(); + $xfer += $elem736->read($input); + $this->resourcePlans []= $elem736; } $xfer += $input->readListEnd(); } else { @@ -22990,9 +23758,9 @@ class WMGetAllResourcePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->resourcePlans)); { - foreach ($this->resourcePlans as $iter702) + foreach ($this->resourcePlans as $iter737) { - $xfer += $iter702->write($output); + $xfer += $iter737->write($output); } } $output->writeListEnd(); @@ -23398,14 +24166,14 @@ class WMValidateResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->errors = array(); - $_size703 = 0; - $_etype706 = 0; - $xfer += $input->readListBegin($_etype706, $_size703); - for ($_i707 = 0; $_i707 < $_size703; ++$_i707) + $_size738 = 0; + $_etype741 = 0; + $xfer += $input->readListBegin($_etype741, $_size738); + for ($_i742 = 0; $_i742 < $_size738; ++$_i742) { - $elem708 = null; - $xfer += $input->readString($elem708); - $this->errors []= $elem708; + $elem743 = null; + $xfer += $input->readString($elem743); + $this->errors []= $elem743; } $xfer += $input->readListEnd(); } else { @@ -23415,14 +24183,14 @@ class WMValidateResourcePlanResponse { case 2: if ($ftype == TType::LST) { $this->warnings = array(); - $_size709 = 0; - $_etype712 = 0; - $xfer += $input->readListBegin($_etype712, $_size709); - for ($_i713 = 0; $_i713 < $_size709; ++$_i713) + $_size744 = 0; + $_etype747 = 0; + $xfer += $input->readListBegin($_etype747, $_size744); + for ($_i748 = 0; $_i748 < $_size744; ++$_i748) { - $elem714 = null; - $xfer += $input->readString($elem714); - $this->warnings []= $elem714; + $elem749 = null; + $xfer += $input->readString($elem749); + $this->warnings []= $elem749; } $xfer += $input->readListEnd(); } else { @@ -23450,9 +24218,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->errors)); { - foreach ($this->errors as $iter715) + foreach ($this->errors as $iter750) { - $xfer += $output->writeString($iter715); + $xfer += $output->writeString($iter750); } } $output->writeListEnd(); @@ -23467,9 +24235,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->warnings)); { - foreach ($this->warnings as $iter716) + foreach ($this->warnings as $iter751) { - $xfer += $output->writeString($iter716); + $xfer += $output->writeString($iter751); } } $output->writeListEnd(); @@ -24142,15 +24910,15 @@ class WMGetTriggersForResourePlanResponse { case 1: if ($ftype == TType::LST) { $this->triggers = array(); - $_size717 = 0; - $_etype720 = 0; - $xfer += $input->readListBegin($_etype720, $_size717); - for ($_i721 = 0; $_i721 < $_size717; ++$_i721) + $_size752 = 0; + $_etype755 = 0; + $xfer += $input->readListBegin($_etype755, $_size752); + for ($_i756 = 0; $_i756 < $_size752; ++$_i756) { - $elem722 = null; - $elem722 = new \metastore\WMTrigger(); - $xfer += $elem722->read($input); - $this->triggers []= $elem722; + $elem757 = null; + $elem757 = new \metastore\WMTrigger(); + $xfer += $elem757->read($input); + $this->triggers []= $elem757; } $xfer += $input->readListEnd(); } else { @@ -24178,9 +24946,9 @@ class WMGetTriggersForResourePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter723) + foreach ($this->triggers as $iter758) { - $xfer += $iter723->write($output); + $xfer += $iter758->write($output); } } $output->writeListEnd(); diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 9b2aaff..d679c50 100755 --- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -166,6 +166,8 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' void abort_txn(AbortTxnRequest rqst)') print(' void abort_txns(AbortTxnsRequest rqst)') print(' void commit_txn(CommitTxnRequest rqst)') + print(' GetOpenWriteIdsResponse get_open_write_ids(GetOpenWriteIdsRequest rqst)') + print(' AllocateTableWriteIdResponse allocate_table_write_id(AllocateTableWriteIdRequest rqst)') print(' LockResponse lock(LockRequest rqst)') print(' LockResponse check_lock(CheckLockRequest rqst)') print(' void unlock(UnlockRequest rqst)') @@ -1126,6 +1128,18 @@ elif cmd == 'commit_txn': sys.exit(1) pp.pprint(client.commit_txn(eval(args[0]),)) +elif cmd == 'get_open_write_ids': + if len(args) != 1: + print('get_open_write_ids requires 1 args') + sys.exit(1) + pp.pprint(client.get_open_write_ids(eval(args[0]),)) + +elif cmd == 'allocate_table_write_id': + if len(args) != 1: + print('allocate_table_write_id requires 1 args') + sys.exit(1) + pp.pprint(client.allocate_table_write_id(eval(args[0]),)) + elif cmd == 'lock': if len(args) != 1: print('lock requires 1 args') diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 2e19105..689364d 100644 --- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -1166,6 +1166,20 @@ def commit_txn(self, rqst): """ pass + def get_open_write_ids(self, rqst): + """ + Parameters: + - rqst + """ + pass + + def allocate_table_write_id(self, rqst): + """ + Parameters: + - rqst + """ + pass + def lock(self, rqst): """ Parameters: @@ -6648,6 +6662,78 @@ def recv_commit_txn(self): raise result.o2 return + def get_open_write_ids(self, rqst): + """ + Parameters: + - rqst + """ + self.send_get_open_write_ids(rqst) + return self.recv_get_open_write_ids() + + def send_get_open_write_ids(self, rqst): + self._oprot.writeMessageBegin('get_open_write_ids', TMessageType.CALL, self._seqid) + args = get_open_write_ids_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_open_write_ids(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_open_write_ids_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_open_write_ids failed: unknown result") + + def allocate_table_write_id(self, rqst): + """ + Parameters: + - rqst + """ + self.send_allocate_table_write_id(rqst) + return self.recv_allocate_table_write_id() + + def send_allocate_table_write_id(self, rqst): + self._oprot.writeMessageBegin('allocate_table_write_id', TMessageType.CALL, self._seqid) + args = allocate_table_write_id_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_allocate_table_write_id(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = allocate_table_write_id_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "allocate_table_write_id failed: unknown result") + def lock(self, rqst): """ Parameters: @@ -8104,6 +8190,8 @@ def __init__(self, handler): self._processMap["abort_txn"] = Processor.process_abort_txn self._processMap["abort_txns"] = Processor.process_abort_txns self._processMap["commit_txn"] = Processor.process_commit_txn + self._processMap["get_open_write_ids"] = Processor.process_get_open_write_ids + self._processMap["allocate_table_write_id"] = Processor.process_allocate_table_write_id self._processMap["lock"] = Processor.process_lock self._processMap["check_lock"] = Processor.process_check_lock self._processMap["unlock"] = Processor.process_unlock @@ -11694,6 +11782,59 @@ def process_commit_txn(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_get_open_write_ids(self, seqid, iprot, oprot): + args = get_open_write_ids_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_open_write_ids_result() + try: + result.success = self._handler.get_open_write_ids(args.rqst) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchTxnException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_open_write_ids", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_allocate_table_write_id(self, seqid, iprot, oprot): + args = allocate_table_write_id_args() + args.read(iprot) + iprot.readMessageEnd() + result = allocate_table_write_id_result() + try: + result.success = self._handler.allocate_table_write_id(args.rqst) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchTxnException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TxnAbortedException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("allocate_table_write_id", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_lock(self, seqid, iprot, oprot): args = lock_args() args.read(iprot) @@ -13510,10 +13651,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype723, _size720) = iprot.readListBegin() - for _i724 in xrange(_size720): - _elem725 = iprot.readString() - self.success.append(_elem725) + (_etype758, _size755) = iprot.readListBegin() + for _i759 in xrange(_size755): + _elem760 = iprot.readString() + self.success.append(_elem760) iprot.readListEnd() else: iprot.skip(ftype) @@ -13536,8 +13677,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter726 in self.success: - oprot.writeString(iter726) + for iter761 in self.success: + oprot.writeString(iter761) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -13642,10 +13783,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype730, _size727) = iprot.readListBegin() - for _i731 in xrange(_size727): - _elem732 = iprot.readString() - self.success.append(_elem732) + (_etype765, _size762) = iprot.readListBegin() + for _i766 in xrange(_size762): + _elem767 = iprot.readString() + self.success.append(_elem767) iprot.readListEnd() else: iprot.skip(ftype) @@ -13668,8 +13809,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter733 in self.success: - oprot.writeString(iter733) + for iter768 in self.success: + oprot.writeString(iter768) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14439,12 +14580,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype735, _vtype736, _size734 ) = iprot.readMapBegin() - for _i738 in xrange(_size734): - _key739 = iprot.readString() - _val740 = Type() - _val740.read(iprot) - self.success[_key739] = _val740 + (_ktype770, _vtype771, _size769 ) = iprot.readMapBegin() + for _i773 in xrange(_size769): + _key774 = iprot.readString() + _val775 = Type() + _val775.read(iprot) + self.success[_key774] = _val775 iprot.readMapEnd() else: iprot.skip(ftype) @@ -14467,9 +14608,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter741,viter742 in self.success.items(): - oprot.writeString(kiter741) - viter742.write(oprot) + for kiter776,viter777 in self.success.items(): + oprot.writeString(kiter776) + viter777.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -14612,11 +14753,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype746, _size743) = iprot.readListBegin() - for _i747 in xrange(_size743): - _elem748 = FieldSchema() - _elem748.read(iprot) - self.success.append(_elem748) + (_etype781, _size778) = iprot.readListBegin() + for _i782 in xrange(_size778): + _elem783 = FieldSchema() + _elem783.read(iprot) + self.success.append(_elem783) iprot.readListEnd() else: iprot.skip(ftype) @@ -14651,8 +14792,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter749 in self.success: - iter749.write(oprot) + for iter784 in self.success: + iter784.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14819,11 +14960,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype753, _size750) = iprot.readListBegin() - for _i754 in xrange(_size750): - _elem755 = FieldSchema() - _elem755.read(iprot) - self.success.append(_elem755) + (_etype788, _size785) = iprot.readListBegin() + for _i789 in xrange(_size785): + _elem790 = FieldSchema() + _elem790.read(iprot) + self.success.append(_elem790) iprot.readListEnd() else: iprot.skip(ftype) @@ -14858,8 +14999,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter756 in self.success: - iter756.write(oprot) + for iter791 in self.success: + iter791.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15012,11 +15153,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype760, _size757) = iprot.readListBegin() - for _i761 in xrange(_size757): - _elem762 = FieldSchema() - _elem762.read(iprot) - self.success.append(_elem762) + (_etype795, _size792) = iprot.readListBegin() + for _i796 in xrange(_size792): + _elem797 = FieldSchema() + _elem797.read(iprot) + self.success.append(_elem797) iprot.readListEnd() else: iprot.skip(ftype) @@ -15051,8 +15192,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter763 in self.success: - iter763.write(oprot) + for iter798 in self.success: + iter798.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15219,11 +15360,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype767, _size764) = iprot.readListBegin() - for _i768 in xrange(_size764): - _elem769 = FieldSchema() - _elem769.read(iprot) - self.success.append(_elem769) + (_etype802, _size799) = iprot.readListBegin() + for _i803 in xrange(_size799): + _elem804 = FieldSchema() + _elem804.read(iprot) + self.success.append(_elem804) iprot.readListEnd() else: iprot.skip(ftype) @@ -15258,8 +15399,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter770 in self.success: - iter770.write(oprot) + for iter805 in self.success: + iter805.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15706,44 +15847,44 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype774, _size771) = iprot.readListBegin() - for _i775 in xrange(_size771): - _elem776 = SQLPrimaryKey() - _elem776.read(iprot) - self.primaryKeys.append(_elem776) + (_etype809, _size806) = iprot.readListBegin() + for _i810 in xrange(_size806): + _elem811 = SQLPrimaryKey() + _elem811.read(iprot) + self.primaryKeys.append(_elem811) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype780, _size777) = iprot.readListBegin() - for _i781 in xrange(_size777): - _elem782 = SQLForeignKey() - _elem782.read(iprot) - self.foreignKeys.append(_elem782) + (_etype815, _size812) = iprot.readListBegin() + for _i816 in xrange(_size812): + _elem817 = SQLForeignKey() + _elem817.read(iprot) + self.foreignKeys.append(_elem817) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype786, _size783) = iprot.readListBegin() - for _i787 in xrange(_size783): - _elem788 = SQLUniqueConstraint() - _elem788.read(iprot) - self.uniqueConstraints.append(_elem788) + (_etype821, _size818) = iprot.readListBegin() + for _i822 in xrange(_size818): + _elem823 = SQLUniqueConstraint() + _elem823.read(iprot) + self.uniqueConstraints.append(_elem823) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype792, _size789) = iprot.readListBegin() - for _i793 in xrange(_size789): - _elem794 = SQLNotNullConstraint() - _elem794.read(iprot) - self.notNullConstraints.append(_elem794) + (_etype827, _size824) = iprot.readListBegin() + for _i828 in xrange(_size824): + _elem829 = SQLNotNullConstraint() + _elem829.read(iprot) + self.notNullConstraints.append(_elem829) iprot.readListEnd() else: iprot.skip(ftype) @@ -15764,29 +15905,29 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter795 in self.primaryKeys: - iter795.write(oprot) + for iter830 in self.primaryKeys: + iter830.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter796 in self.foreignKeys: - iter796.write(oprot) + for iter831 in self.foreignKeys: + iter831.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter797 in self.uniqueConstraints: - iter797.write(oprot) + for iter832 in self.uniqueConstraints: + iter832.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter798 in self.notNullConstraints: - iter798.write(oprot) + for iter833 in self.notNullConstraints: + iter833.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17052,10 +17193,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype802, _size799) = iprot.readListBegin() - for _i803 in xrange(_size799): - _elem804 = iprot.readString() - self.partNames.append(_elem804) + (_etype837, _size834) = iprot.readListBegin() + for _i838 in xrange(_size834): + _elem839 = iprot.readString() + self.partNames.append(_elem839) iprot.readListEnd() else: iprot.skip(ftype) @@ -17080,8 +17221,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter805 in self.partNames: - oprot.writeString(iter805) + for iter840 in self.partNames: + oprot.writeString(iter840) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17281,10 +17422,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype809, _size806) = iprot.readListBegin() - for _i810 in xrange(_size806): - _elem811 = iprot.readString() - self.success.append(_elem811) + (_etype844, _size841) = iprot.readListBegin() + for _i845 in xrange(_size841): + _elem846 = iprot.readString() + self.success.append(_elem846) iprot.readListEnd() else: iprot.skip(ftype) @@ -17307,8 +17448,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter812 in self.success: - oprot.writeString(iter812) + for iter847 in self.success: + oprot.writeString(iter847) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17458,10 +17599,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype816, _size813) = iprot.readListBegin() - for _i817 in xrange(_size813): - _elem818 = iprot.readString() - self.success.append(_elem818) + (_etype851, _size848) = iprot.readListBegin() + for _i852 in xrange(_size848): + _elem853 = iprot.readString() + self.success.append(_elem853) iprot.readListEnd() else: iprot.skip(ftype) @@ -17484,8 +17625,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter819 in self.success: - oprot.writeString(iter819) + for iter854 in self.success: + oprot.writeString(iter854) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17609,10 +17750,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype823, _size820) = iprot.readListBegin() - for _i824 in xrange(_size820): - _elem825 = iprot.readString() - self.success.append(_elem825) + (_etype858, _size855) = iprot.readListBegin() + for _i859 in xrange(_size855): + _elem860 = iprot.readString() + self.success.append(_elem860) iprot.readListEnd() else: iprot.skip(ftype) @@ -17635,8 +17776,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter826 in self.success: - oprot.writeString(iter826) + for iter861 in self.success: + oprot.writeString(iter861) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17709,10 +17850,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype830, _size827) = iprot.readListBegin() - for _i831 in xrange(_size827): - _elem832 = iprot.readString() - self.tbl_types.append(_elem832) + (_etype865, _size862) = iprot.readListBegin() + for _i866 in xrange(_size862): + _elem867 = iprot.readString() + self.tbl_types.append(_elem867) iprot.readListEnd() else: iprot.skip(ftype) @@ -17737,8 +17878,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter833 in self.tbl_types: - oprot.writeString(iter833) + for iter868 in self.tbl_types: + oprot.writeString(iter868) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17794,11 +17935,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype837, _size834) = iprot.readListBegin() - for _i838 in xrange(_size834): - _elem839 = TableMeta() - _elem839.read(iprot) - self.success.append(_elem839) + (_etype872, _size869) = iprot.readListBegin() + for _i873 in xrange(_size869): + _elem874 = TableMeta() + _elem874.read(iprot) + self.success.append(_elem874) iprot.readListEnd() else: iprot.skip(ftype) @@ -17821,8 +17962,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter840 in self.success: - iter840.write(oprot) + for iter875 in self.success: + iter875.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17946,10 +18087,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype844, _size841) = iprot.readListBegin() - for _i845 in xrange(_size841): - _elem846 = iprot.readString() - self.success.append(_elem846) + (_etype879, _size876) = iprot.readListBegin() + for _i880 in xrange(_size876): + _elem881 = iprot.readString() + self.success.append(_elem881) iprot.readListEnd() else: iprot.skip(ftype) @@ -17972,8 +18113,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter847 in self.success: - oprot.writeString(iter847) + for iter882 in self.success: + oprot.writeString(iter882) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18209,10 +18350,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype851, _size848) = iprot.readListBegin() - for _i852 in xrange(_size848): - _elem853 = iprot.readString() - self.tbl_names.append(_elem853) + (_etype886, _size883) = iprot.readListBegin() + for _i887 in xrange(_size883): + _elem888 = iprot.readString() + self.tbl_names.append(_elem888) iprot.readListEnd() else: iprot.skip(ftype) @@ -18233,8 +18374,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter854 in self.tbl_names: - oprot.writeString(iter854) + for iter889 in self.tbl_names: + oprot.writeString(iter889) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18286,11 +18427,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype858, _size855) = iprot.readListBegin() - for _i859 in xrange(_size855): - _elem860 = Table() - _elem860.read(iprot) - self.success.append(_elem860) + (_etype893, _size890) = iprot.readListBegin() + for _i894 in xrange(_size890): + _elem895 = Table() + _elem895.read(iprot) + self.success.append(_elem895) iprot.readListEnd() else: iprot.skip(ftype) @@ -18307,8 +18448,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter861 in self.success: - iter861.write(oprot) + for iter896 in self.success: + iter896.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18700,10 +18841,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype865, _size862) = iprot.readListBegin() - for _i866 in xrange(_size862): - _elem867 = iprot.readString() - self.tbl_names.append(_elem867) + (_etype900, _size897) = iprot.readListBegin() + for _i901 in xrange(_size897): + _elem902 = iprot.readString() + self.tbl_names.append(_elem902) iprot.readListEnd() else: iprot.skip(ftype) @@ -18724,8 +18865,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter868 in self.tbl_names: - oprot.writeString(iter868) + for iter903 in self.tbl_names: + oprot.writeString(iter903) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18786,12 +18927,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype870, _vtype871, _size869 ) = iprot.readMapBegin() - for _i873 in xrange(_size869): - _key874 = iprot.readString() - _val875 = Materialization() - _val875.read(iprot) - self.success[_key874] = _val875 + (_ktype905, _vtype906, _size904 ) = iprot.readMapBegin() + for _i908 in xrange(_size904): + _key909 = iprot.readString() + _val910 = Materialization() + _val910.read(iprot) + self.success[_key909] = _val910 iprot.readMapEnd() else: iprot.skip(ftype) @@ -18826,9 +18967,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter876,viter877 in self.success.items(): - oprot.writeString(kiter876) - viter877.write(oprot) + for kiter911,viter912 in self.success.items(): + oprot.writeString(kiter911) + viter912.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18994,10 +19135,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype881, _size878) = iprot.readListBegin() - for _i882 in xrange(_size878): - _elem883 = iprot.readString() - self.success.append(_elem883) + (_etype916, _size913) = iprot.readListBegin() + for _i917 in xrange(_size913): + _elem918 = iprot.readString() + self.success.append(_elem918) iprot.readListEnd() else: iprot.skip(ftype) @@ -19032,8 +19173,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter884 in self.success: - oprot.writeString(iter884) + for iter919 in self.success: + oprot.writeString(iter919) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20003,11 +20144,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype888, _size885) = iprot.readListBegin() - for _i889 in xrange(_size885): - _elem890 = Partition() - _elem890.read(iprot) - self.new_parts.append(_elem890) + (_etype923, _size920) = iprot.readListBegin() + for _i924 in xrange(_size920): + _elem925 = Partition() + _elem925.read(iprot) + self.new_parts.append(_elem925) iprot.readListEnd() else: iprot.skip(ftype) @@ -20024,8 +20165,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter891 in self.new_parts: - iter891.write(oprot) + for iter926 in self.new_parts: + iter926.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20183,11 +20324,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype895, _size892) = iprot.readListBegin() - for _i896 in xrange(_size892): - _elem897 = PartitionSpec() - _elem897.read(iprot) - self.new_parts.append(_elem897) + (_etype930, _size927) = iprot.readListBegin() + for _i931 in xrange(_size927): + _elem932 = PartitionSpec() + _elem932.read(iprot) + self.new_parts.append(_elem932) iprot.readListEnd() else: iprot.skip(ftype) @@ -20204,8 +20345,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter898 in self.new_parts: - iter898.write(oprot) + for iter933 in self.new_parts: + iter933.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20379,10 +20520,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype902, _size899) = iprot.readListBegin() - for _i903 in xrange(_size899): - _elem904 = iprot.readString() - self.part_vals.append(_elem904) + (_etype937, _size934) = iprot.readListBegin() + for _i938 in xrange(_size934): + _elem939 = iprot.readString() + self.part_vals.append(_elem939) iprot.readListEnd() else: iprot.skip(ftype) @@ -20407,8 +20548,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter905 in self.part_vals: - oprot.writeString(iter905) + for iter940 in self.part_vals: + oprot.writeString(iter940) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20761,10 +20902,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype909, _size906) = iprot.readListBegin() - for _i910 in xrange(_size906): - _elem911 = iprot.readString() - self.part_vals.append(_elem911) + (_etype944, _size941) = iprot.readListBegin() + for _i945 in xrange(_size941): + _elem946 = iprot.readString() + self.part_vals.append(_elem946) iprot.readListEnd() else: iprot.skip(ftype) @@ -20795,8 +20936,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter912 in self.part_vals: - oprot.writeString(iter912) + for iter947 in self.part_vals: + oprot.writeString(iter947) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -21391,10 +21532,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype916, _size913) = iprot.readListBegin() - for _i917 in xrange(_size913): - _elem918 = iprot.readString() - self.part_vals.append(_elem918) + (_etype951, _size948) = iprot.readListBegin() + for _i952 in xrange(_size948): + _elem953 = iprot.readString() + self.part_vals.append(_elem953) iprot.readListEnd() else: iprot.skip(ftype) @@ -21424,8 +21565,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter919 in self.part_vals: - oprot.writeString(iter919) + for iter954 in self.part_vals: + oprot.writeString(iter954) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -21598,10 +21739,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype923, _size920) = iprot.readListBegin() - for _i924 in xrange(_size920): - _elem925 = iprot.readString() - self.part_vals.append(_elem925) + (_etype958, _size955) = iprot.readListBegin() + for _i959 in xrange(_size955): + _elem960 = iprot.readString() + self.part_vals.append(_elem960) iprot.readListEnd() else: iprot.skip(ftype) @@ -21637,8 +21778,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter926 in self.part_vals: - oprot.writeString(iter926) + for iter961 in self.part_vals: + oprot.writeString(iter961) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -22375,10 +22516,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype930, _size927) = iprot.readListBegin() - for _i931 in xrange(_size927): - _elem932 = iprot.readString() - self.part_vals.append(_elem932) + (_etype965, _size962) = iprot.readListBegin() + for _i966 in xrange(_size962): + _elem967 = iprot.readString() + self.part_vals.append(_elem967) iprot.readListEnd() else: iprot.skip(ftype) @@ -22403,8 +22544,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter933 in self.part_vals: - oprot.writeString(iter933) + for iter968 in self.part_vals: + oprot.writeString(iter968) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22563,11 +22704,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype935, _vtype936, _size934 ) = iprot.readMapBegin() - for _i938 in xrange(_size934): - _key939 = iprot.readString() - _val940 = iprot.readString() - self.partitionSpecs[_key939] = _val940 + (_ktype970, _vtype971, _size969 ) = iprot.readMapBegin() + for _i973 in xrange(_size969): + _key974 = iprot.readString() + _val975 = iprot.readString() + self.partitionSpecs[_key974] = _val975 iprot.readMapEnd() else: iprot.skip(ftype) @@ -22604,9 +22745,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter941,viter942 in self.partitionSpecs.items(): - oprot.writeString(kiter941) - oprot.writeString(viter942) + for kiter976,viter977 in self.partitionSpecs.items(): + oprot.writeString(kiter976) + oprot.writeString(viter977) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -22811,11 +22952,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype944, _vtype945, _size943 ) = iprot.readMapBegin() - for _i947 in xrange(_size943): - _key948 = iprot.readString() - _val949 = iprot.readString() - self.partitionSpecs[_key948] = _val949 + (_ktype979, _vtype980, _size978 ) = iprot.readMapBegin() + for _i982 in xrange(_size978): + _key983 = iprot.readString() + _val984 = iprot.readString() + self.partitionSpecs[_key983] = _val984 iprot.readMapEnd() else: iprot.skip(ftype) @@ -22852,9 +22993,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter950,viter951 in self.partitionSpecs.items(): - oprot.writeString(kiter950) - oprot.writeString(viter951) + for kiter985,viter986 in self.partitionSpecs.items(): + oprot.writeString(kiter985) + oprot.writeString(viter986) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -22937,11 +23078,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype955, _size952) = iprot.readListBegin() - for _i956 in xrange(_size952): - _elem957 = Partition() - _elem957.read(iprot) - self.success.append(_elem957) + (_etype990, _size987) = iprot.readListBegin() + for _i991 in xrange(_size987): + _elem992 = Partition() + _elem992.read(iprot) + self.success.append(_elem992) iprot.readListEnd() else: iprot.skip(ftype) @@ -22982,8 +23123,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter958 in self.success: - iter958.write(oprot) + for iter993 in self.success: + iter993.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23077,10 +23218,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype962, _size959) = iprot.readListBegin() - for _i963 in xrange(_size959): - _elem964 = iprot.readString() - self.part_vals.append(_elem964) + (_etype997, _size994) = iprot.readListBegin() + for _i998 in xrange(_size994): + _elem999 = iprot.readString() + self.part_vals.append(_elem999) iprot.readListEnd() else: iprot.skip(ftype) @@ -23092,10 +23233,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype968, _size965) = iprot.readListBegin() - for _i969 in xrange(_size965): - _elem970 = iprot.readString() - self.group_names.append(_elem970) + (_etype1003, _size1000) = iprot.readListBegin() + for _i1004 in xrange(_size1000): + _elem1005 = iprot.readString() + self.group_names.append(_elem1005) iprot.readListEnd() else: iprot.skip(ftype) @@ -23120,8 +23261,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter971 in self.part_vals: - oprot.writeString(iter971) + for iter1006 in self.part_vals: + oprot.writeString(iter1006) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -23131,8 +23272,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter972 in self.group_names: - oprot.writeString(iter972) + for iter1007 in self.group_names: + oprot.writeString(iter1007) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23561,11 +23702,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype976, _size973) = iprot.readListBegin() - for _i977 in xrange(_size973): - _elem978 = Partition() - _elem978.read(iprot) - self.success.append(_elem978) + (_etype1011, _size1008) = iprot.readListBegin() + for _i1012 in xrange(_size1008): + _elem1013 = Partition() + _elem1013.read(iprot) + self.success.append(_elem1013) iprot.readListEnd() else: iprot.skip(ftype) @@ -23594,8 +23735,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter979 in self.success: - iter979.write(oprot) + for iter1014 in self.success: + iter1014.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23689,10 +23830,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype983, _size980) = iprot.readListBegin() - for _i984 in xrange(_size980): - _elem985 = iprot.readString() - self.group_names.append(_elem985) + (_etype1018, _size1015) = iprot.readListBegin() + for _i1019 in xrange(_size1015): + _elem1020 = iprot.readString() + self.group_names.append(_elem1020) iprot.readListEnd() else: iprot.skip(ftype) @@ -23725,8 +23866,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter986 in self.group_names: - oprot.writeString(iter986) + for iter1021 in self.group_names: + oprot.writeString(iter1021) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23787,11 +23928,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype990, _size987) = iprot.readListBegin() - for _i991 in xrange(_size987): - _elem992 = Partition() - _elem992.read(iprot) - self.success.append(_elem992) + (_etype1025, _size1022) = iprot.readListBegin() + for _i1026 in xrange(_size1022): + _elem1027 = Partition() + _elem1027.read(iprot) + self.success.append(_elem1027) iprot.readListEnd() else: iprot.skip(ftype) @@ -23820,8 +23961,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter993 in self.success: - iter993.write(oprot) + for iter1028 in self.success: + iter1028.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23979,11 +24120,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype997, _size994) = iprot.readListBegin() - for _i998 in xrange(_size994): - _elem999 = PartitionSpec() - _elem999.read(iprot) - self.success.append(_elem999) + (_etype1032, _size1029) = iprot.readListBegin() + for _i1033 in xrange(_size1029): + _elem1034 = PartitionSpec() + _elem1034.read(iprot) + self.success.append(_elem1034) iprot.readListEnd() else: iprot.skip(ftype) @@ -24012,8 +24153,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1000 in self.success: - iter1000.write(oprot) + for iter1035 in self.success: + iter1035.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24171,10 +24312,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1004, _size1001) = iprot.readListBegin() - for _i1005 in xrange(_size1001): - _elem1006 = iprot.readString() - self.success.append(_elem1006) + (_etype1039, _size1036) = iprot.readListBegin() + for _i1040 in xrange(_size1036): + _elem1041 = iprot.readString() + self.success.append(_elem1041) iprot.readListEnd() else: iprot.skip(ftype) @@ -24203,8 +24344,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1007 in self.success: - oprot.writeString(iter1007) + for iter1042 in self.success: + oprot.writeString(iter1042) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24444,10 +24585,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1011, _size1008) = iprot.readListBegin() - for _i1012 in xrange(_size1008): - _elem1013 = iprot.readString() - self.part_vals.append(_elem1013) + (_etype1046, _size1043) = iprot.readListBegin() + for _i1047 in xrange(_size1043): + _elem1048 = iprot.readString() + self.part_vals.append(_elem1048) iprot.readListEnd() else: iprot.skip(ftype) @@ -24477,8 +24618,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1014 in self.part_vals: - oprot.writeString(iter1014) + for iter1049 in self.part_vals: + oprot.writeString(iter1049) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -24542,11 +24683,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1018, _size1015) = iprot.readListBegin() - for _i1019 in xrange(_size1015): - _elem1020 = Partition() - _elem1020.read(iprot) - self.success.append(_elem1020) + (_etype1053, _size1050) = iprot.readListBegin() + for _i1054 in xrange(_size1050): + _elem1055 = Partition() + _elem1055.read(iprot) + self.success.append(_elem1055) iprot.readListEnd() else: iprot.skip(ftype) @@ -24575,8 +24716,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1021 in self.success: - iter1021.write(oprot) + for iter1056 in self.success: + iter1056.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24663,10 +24804,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1025, _size1022) = iprot.readListBegin() - for _i1026 in xrange(_size1022): - _elem1027 = iprot.readString() - self.part_vals.append(_elem1027) + (_etype1060, _size1057) = iprot.readListBegin() + for _i1061 in xrange(_size1057): + _elem1062 = iprot.readString() + self.part_vals.append(_elem1062) iprot.readListEnd() else: iprot.skip(ftype) @@ -24683,10 +24824,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1031, _size1028) = iprot.readListBegin() - for _i1032 in xrange(_size1028): - _elem1033 = iprot.readString() - self.group_names.append(_elem1033) + (_etype1066, _size1063) = iprot.readListBegin() + for _i1067 in xrange(_size1063): + _elem1068 = iprot.readString() + self.group_names.append(_elem1068) iprot.readListEnd() else: iprot.skip(ftype) @@ -24711,8 +24852,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1034 in self.part_vals: - oprot.writeString(iter1034) + for iter1069 in self.part_vals: + oprot.writeString(iter1069) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -24726,8 +24867,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1035 in self.group_names: - oprot.writeString(iter1035) + for iter1070 in self.group_names: + oprot.writeString(iter1070) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24789,11 +24930,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1039, _size1036) = iprot.readListBegin() - for _i1040 in xrange(_size1036): - _elem1041 = Partition() - _elem1041.read(iprot) - self.success.append(_elem1041) + (_etype1074, _size1071) = iprot.readListBegin() + for _i1075 in xrange(_size1071): + _elem1076 = Partition() + _elem1076.read(iprot) + self.success.append(_elem1076) iprot.readListEnd() else: iprot.skip(ftype) @@ -24822,8 +24963,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1042 in self.success: - iter1042.write(oprot) + for iter1077 in self.success: + iter1077.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24904,10 +25045,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1046, _size1043) = iprot.readListBegin() - for _i1047 in xrange(_size1043): - _elem1048 = iprot.readString() - self.part_vals.append(_elem1048) + (_etype1081, _size1078) = iprot.readListBegin() + for _i1082 in xrange(_size1078): + _elem1083 = iprot.readString() + self.part_vals.append(_elem1083) iprot.readListEnd() else: iprot.skip(ftype) @@ -24937,8 +25078,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1049 in self.part_vals: - oprot.writeString(iter1049) + for iter1084 in self.part_vals: + oprot.writeString(iter1084) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -25002,10 +25143,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1053, _size1050) = iprot.readListBegin() - for _i1054 in xrange(_size1050): - _elem1055 = iprot.readString() - self.success.append(_elem1055) + (_etype1088, _size1085) = iprot.readListBegin() + for _i1089 in xrange(_size1085): + _elem1090 = iprot.readString() + self.success.append(_elem1090) iprot.readListEnd() else: iprot.skip(ftype) @@ -25034,8 +25175,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1056 in self.success: - oprot.writeString(iter1056) + for iter1091 in self.success: + oprot.writeString(iter1091) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25206,11 +25347,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1060, _size1057) = iprot.readListBegin() - for _i1061 in xrange(_size1057): - _elem1062 = Partition() - _elem1062.read(iprot) - self.success.append(_elem1062) + (_etype1095, _size1092) = iprot.readListBegin() + for _i1096 in xrange(_size1092): + _elem1097 = Partition() + _elem1097.read(iprot) + self.success.append(_elem1097) iprot.readListEnd() else: iprot.skip(ftype) @@ -25239,8 +25380,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1063 in self.success: - iter1063.write(oprot) + for iter1098 in self.success: + iter1098.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25411,11 +25552,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1067, _size1064) = iprot.readListBegin() - for _i1068 in xrange(_size1064): - _elem1069 = PartitionSpec() - _elem1069.read(iprot) - self.success.append(_elem1069) + (_etype1102, _size1099) = iprot.readListBegin() + for _i1103 in xrange(_size1099): + _elem1104 = PartitionSpec() + _elem1104.read(iprot) + self.success.append(_elem1104) iprot.readListEnd() else: iprot.skip(ftype) @@ -25444,8 +25585,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1070 in self.success: - iter1070.write(oprot) + for iter1105 in self.success: + iter1105.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25865,10 +26006,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1074, _size1071) = iprot.readListBegin() - for _i1075 in xrange(_size1071): - _elem1076 = iprot.readString() - self.names.append(_elem1076) + (_etype1109, _size1106) = iprot.readListBegin() + for _i1110 in xrange(_size1106): + _elem1111 = iprot.readString() + self.names.append(_elem1111) iprot.readListEnd() else: iprot.skip(ftype) @@ -25893,8 +26034,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1077 in self.names: - oprot.writeString(iter1077) + for iter1112 in self.names: + oprot.writeString(iter1112) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25953,11 +26094,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1081, _size1078) = iprot.readListBegin() - for _i1082 in xrange(_size1078): - _elem1083 = Partition() - _elem1083.read(iprot) - self.success.append(_elem1083) + (_etype1116, _size1113) = iprot.readListBegin() + for _i1117 in xrange(_size1113): + _elem1118 = Partition() + _elem1118.read(iprot) + self.success.append(_elem1118) iprot.readListEnd() else: iprot.skip(ftype) @@ -25986,8 +26127,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1084 in self.success: - iter1084.write(oprot) + for iter1119 in self.success: + iter1119.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26237,11 +26378,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1088, _size1085) = iprot.readListBegin() - for _i1089 in xrange(_size1085): - _elem1090 = Partition() - _elem1090.read(iprot) - self.new_parts.append(_elem1090) + (_etype1123, _size1120) = iprot.readListBegin() + for _i1124 in xrange(_size1120): + _elem1125 = Partition() + _elem1125.read(iprot) + self.new_parts.append(_elem1125) iprot.readListEnd() else: iprot.skip(ftype) @@ -26266,8 +26407,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1091 in self.new_parts: - iter1091.write(oprot) + for iter1126 in self.new_parts: + iter1126.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26420,11 +26561,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1095, _size1092) = iprot.readListBegin() - for _i1096 in xrange(_size1092): - _elem1097 = Partition() - _elem1097.read(iprot) - self.new_parts.append(_elem1097) + (_etype1130, _size1127) = iprot.readListBegin() + for _i1131 in xrange(_size1127): + _elem1132 = Partition() + _elem1132.read(iprot) + self.new_parts.append(_elem1132) iprot.readListEnd() else: iprot.skip(ftype) @@ -26455,8 +26596,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1098 in self.new_parts: - iter1098.write(oprot) + for iter1133 in self.new_parts: + iter1133.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -26800,10 +26941,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1102, _size1099) = iprot.readListBegin() - for _i1103 in xrange(_size1099): - _elem1104 = iprot.readString() - self.part_vals.append(_elem1104) + (_etype1137, _size1134) = iprot.readListBegin() + for _i1138 in xrange(_size1134): + _elem1139 = iprot.readString() + self.part_vals.append(_elem1139) iprot.readListEnd() else: iprot.skip(ftype) @@ -26834,8 +26975,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1105 in self.part_vals: - oprot.writeString(iter1105) + for iter1140 in self.part_vals: + oprot.writeString(iter1140) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -26977,10 +27118,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1109, _size1106) = iprot.readListBegin() - for _i1110 in xrange(_size1106): - _elem1111 = iprot.readString() - self.part_vals.append(_elem1111) + (_etype1144, _size1141) = iprot.readListBegin() + for _i1145 in xrange(_size1141): + _elem1146 = iprot.readString() + self.part_vals.append(_elem1146) iprot.readListEnd() else: iprot.skip(ftype) @@ -27002,8 +27143,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1112 in self.part_vals: - oprot.writeString(iter1112) + for iter1147 in self.part_vals: + oprot.writeString(iter1147) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -27361,10 +27502,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1116, _size1113) = iprot.readListBegin() - for _i1117 in xrange(_size1113): - _elem1118 = iprot.readString() - self.success.append(_elem1118) + (_etype1151, _size1148) = iprot.readListBegin() + for _i1152 in xrange(_size1148): + _elem1153 = iprot.readString() + self.success.append(_elem1153) iprot.readListEnd() else: iprot.skip(ftype) @@ -27387,8 +27528,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1119 in self.success: - oprot.writeString(iter1119) + for iter1154 in self.success: + oprot.writeString(iter1154) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27512,11 +27653,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1121, _vtype1122, _size1120 ) = iprot.readMapBegin() - for _i1124 in xrange(_size1120): - _key1125 = iprot.readString() - _val1126 = iprot.readString() - self.success[_key1125] = _val1126 + (_ktype1156, _vtype1157, _size1155 ) = iprot.readMapBegin() + for _i1159 in xrange(_size1155): + _key1160 = iprot.readString() + _val1161 = iprot.readString() + self.success[_key1160] = _val1161 iprot.readMapEnd() else: iprot.skip(ftype) @@ -27539,9 +27680,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1127,viter1128 in self.success.items(): - oprot.writeString(kiter1127) - oprot.writeString(viter1128) + for kiter1162,viter1163 in self.success.items(): + oprot.writeString(kiter1162) + oprot.writeString(viter1163) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27617,11 +27758,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1130, _vtype1131, _size1129 ) = iprot.readMapBegin() - for _i1133 in xrange(_size1129): - _key1134 = iprot.readString() - _val1135 = iprot.readString() - self.part_vals[_key1134] = _val1135 + (_ktype1165, _vtype1166, _size1164 ) = iprot.readMapBegin() + for _i1168 in xrange(_size1164): + _key1169 = iprot.readString() + _val1170 = iprot.readString() + self.part_vals[_key1169] = _val1170 iprot.readMapEnd() else: iprot.skip(ftype) @@ -27651,9 +27792,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1136,viter1137 in self.part_vals.items(): - oprot.writeString(kiter1136) - oprot.writeString(viter1137) + for kiter1171,viter1172 in self.part_vals.items(): + oprot.writeString(kiter1171) + oprot.writeString(viter1172) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -27867,11 +28008,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1139, _vtype1140, _size1138 ) = iprot.readMapBegin() - for _i1142 in xrange(_size1138): - _key1143 = iprot.readString() - _val1144 = iprot.readString() - self.part_vals[_key1143] = _val1144 + (_ktype1174, _vtype1175, _size1173 ) = iprot.readMapBegin() + for _i1177 in xrange(_size1173): + _key1178 = iprot.readString() + _val1179 = iprot.readString() + self.part_vals[_key1178] = _val1179 iprot.readMapEnd() else: iprot.skip(ftype) @@ -27901,9 +28042,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1145,viter1146 in self.part_vals.items(): - oprot.writeString(kiter1145) - oprot.writeString(viter1146) + for kiter1180,viter1181 in self.part_vals.items(): + oprot.writeString(kiter1180) + oprot.writeString(viter1181) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -28958,11 +29099,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1150, _size1147) = iprot.readListBegin() - for _i1151 in xrange(_size1147): - _elem1152 = Index() - _elem1152.read(iprot) - self.success.append(_elem1152) + (_etype1185, _size1182) = iprot.readListBegin() + for _i1186 in xrange(_size1182): + _elem1187 = Index() + _elem1187.read(iprot) + self.success.append(_elem1187) iprot.readListEnd() else: iprot.skip(ftype) @@ -28991,8 +29132,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1153 in self.success: - iter1153.write(oprot) + for iter1188 in self.success: + iter1188.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29147,10 +29288,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1157, _size1154) = iprot.readListBegin() - for _i1158 in xrange(_size1154): - _elem1159 = iprot.readString() - self.success.append(_elem1159) + (_etype1192, _size1189) = iprot.readListBegin() + for _i1193 in xrange(_size1189): + _elem1194 = iprot.readString() + self.success.append(_elem1194) iprot.readListEnd() else: iprot.skip(ftype) @@ -29173,8 +29314,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1160 in self.success: - oprot.writeString(iter1160) + for iter1195 in self.success: + oprot.writeString(iter1195) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -32358,10 +32499,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1164, _size1161) = iprot.readListBegin() - for _i1165 in xrange(_size1161): - _elem1166 = iprot.readString() - self.success.append(_elem1166) + (_etype1199, _size1196) = iprot.readListBegin() + for _i1200 in xrange(_size1196): + _elem1201 = iprot.readString() + self.success.append(_elem1201) iprot.readListEnd() else: iprot.skip(ftype) @@ -32384,8 +32525,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1167 in self.success: - oprot.writeString(iter1167) + for iter1202 in self.success: + oprot.writeString(iter1202) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33073,10 +33214,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1171, _size1168) = iprot.readListBegin() - for _i1172 in xrange(_size1168): - _elem1173 = iprot.readString() - self.success.append(_elem1173) + (_etype1206, _size1203) = iprot.readListBegin() + for _i1207 in xrange(_size1203): + _elem1208 = iprot.readString() + self.success.append(_elem1208) iprot.readListEnd() else: iprot.skip(ftype) @@ -33099,8 +33240,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1174 in self.success: - oprot.writeString(iter1174) + for iter1209 in self.success: + oprot.writeString(iter1209) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33614,11 +33755,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1178, _size1175) = iprot.readListBegin() - for _i1179 in xrange(_size1175): - _elem1180 = Role() - _elem1180.read(iprot) - self.success.append(_elem1180) + (_etype1213, _size1210) = iprot.readListBegin() + for _i1214 in xrange(_size1210): + _elem1215 = Role() + _elem1215.read(iprot) + self.success.append(_elem1215) iprot.readListEnd() else: iprot.skip(ftype) @@ -33641,8 +33782,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1181 in self.success: - iter1181.write(oprot) + for iter1216 in self.success: + iter1216.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -34151,10 +34292,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1185, _size1182) = iprot.readListBegin() - for _i1186 in xrange(_size1182): - _elem1187 = iprot.readString() - self.group_names.append(_elem1187) + (_etype1220, _size1217) = iprot.readListBegin() + for _i1221 in xrange(_size1217): + _elem1222 = iprot.readString() + self.group_names.append(_elem1222) iprot.readListEnd() else: iprot.skip(ftype) @@ -34179,8 +34320,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1188 in self.group_names: - oprot.writeString(iter1188) + for iter1223 in self.group_names: + oprot.writeString(iter1223) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -34407,11 +34548,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1192, _size1189) = iprot.readListBegin() - for _i1193 in xrange(_size1189): - _elem1194 = HiveObjectPrivilege() - _elem1194.read(iprot) - self.success.append(_elem1194) + (_etype1227, _size1224) = iprot.readListBegin() + for _i1228 in xrange(_size1224): + _elem1229 = HiveObjectPrivilege() + _elem1229.read(iprot) + self.success.append(_elem1229) iprot.readListEnd() else: iprot.skip(ftype) @@ -34434,8 +34575,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1195 in self.success: - iter1195.write(oprot) + for iter1230 in self.success: + iter1230.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -34933,10 +35074,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1199, _size1196) = iprot.readListBegin() - for _i1200 in xrange(_size1196): - _elem1201 = iprot.readString() - self.group_names.append(_elem1201) + (_etype1234, _size1231) = iprot.readListBegin() + for _i1235 in xrange(_size1231): + _elem1236 = iprot.readString() + self.group_names.append(_elem1236) iprot.readListEnd() else: iprot.skip(ftype) @@ -34957,8 +35098,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1202 in self.group_names: - oprot.writeString(iter1202) + for iter1237 in self.group_names: + oprot.writeString(iter1237) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -35013,10 +35154,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1206, _size1203) = iprot.readListBegin() - for _i1207 in xrange(_size1203): - _elem1208 = iprot.readString() - self.success.append(_elem1208) + (_etype1241, _size1238) = iprot.readListBegin() + for _i1242 in xrange(_size1238): + _elem1243 = iprot.readString() + self.success.append(_elem1243) iprot.readListEnd() else: iprot.skip(ftype) @@ -35039,8 +35180,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1209 in self.success: - oprot.writeString(iter1209) + for iter1244 in self.success: + oprot.writeString(iter1244) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -35972,10 +36113,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1213, _size1210) = iprot.readListBegin() - for _i1214 in xrange(_size1210): - _elem1215 = iprot.readString() - self.success.append(_elem1215) + (_etype1248, _size1245) = iprot.readListBegin() + for _i1249 in xrange(_size1245): + _elem1250 = iprot.readString() + self.success.append(_elem1250) iprot.readListEnd() else: iprot.skip(ftype) @@ -35992,8 +36133,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1216 in self.success: - oprot.writeString(iter1216) + for iter1251 in self.success: + oprot.writeString(iter1251) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -36520,10 +36661,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1220, _size1217) = iprot.readListBegin() - for _i1221 in xrange(_size1217): - _elem1222 = iprot.readString() - self.success.append(_elem1222) + (_etype1255, _size1252) = iprot.readListBegin() + for _i1256 in xrange(_size1252): + _elem1257 = iprot.readString() + self.success.append(_elem1257) iprot.readListEnd() else: iprot.skip(ftype) @@ -36540,8 +36681,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1223 in self.success: - oprot.writeString(iter1223) + for iter1258 in self.success: + oprot.writeString(iter1258) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -37330,6 +37471,338 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class get_open_write_ids_args: + """ + Attributes: + - rqst + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (GetOpenWriteIdsRequest, GetOpenWriteIdsRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, rqst=None,): + self.rqst = rqst + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = GetOpenWriteIdsRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_open_write_ids_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.rqst) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_open_write_ids_result: + """ + Attributes: + - success + - o1 + - o2 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (GetOpenWriteIdsResponse, GetOpenWriteIdsResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + ) + + def __init__(self, success=None, o1=None, o2=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetOpenWriteIdsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_open_write_ids_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class allocate_table_write_id_args: + """ + Attributes: + - rqst + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (AllocateTableWriteIdRequest, AllocateTableWriteIdRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, rqst=None,): + self.rqst = rqst + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = AllocateTableWriteIdRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('allocate_table_write_id_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.rqst) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class allocate_table_write_id_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (AllocateTableWriteIdResponse, AllocateTableWriteIdResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + ) + + def __init__(self, success=None, o1=None, o2=None, o3=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = AllocateTableWriteIdResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnAbortedException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('allocate_table_write_id_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class lock_args: """ Attributes: diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 5598859..482b1d4 100644 --- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -9967,6 +9967,568 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class GetOpenWriteIdsRequest: + """ + Attributes: + - tableNames + - validTxnStr + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'tableNames', (TType.STRING,None), None, ), # 1 + (2, TType.STRING, 'validTxnStr', None, None, ), # 2 + ) + + def __init__(self, tableNames=None, validTxnStr=None,): + self.tableNames = tableNames + self.validTxnStr = validTxnStr + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.tableNames = [] + (_etype493, _size490) = iprot.readListBegin() + for _i494 in xrange(_size490): + _elem495 = iprot.readString() + self.tableNames.append(_elem495) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.validTxnStr = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetOpenWriteIdsRequest') + if self.tableNames is not None: + oprot.writeFieldBegin('tableNames', TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.tableNames)) + for iter496 in self.tableNames: + oprot.writeString(iter496) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.validTxnStr is not None: + oprot.writeFieldBegin('validTxnStr', TType.STRING, 2) + oprot.writeString(self.validTxnStr) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.tableNames is None: + raise TProtocol.TProtocolException(message='Required field tableNames is unset!') + if self.validTxnStr is None: + raise TProtocol.TProtocolException(message='Required field validTxnStr is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.tableNames) + value = (value * 31) ^ hash(self.validTxnStr) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class OpenWriteIds: + """ + Attributes: + - tableName + - writeIdHighWaterMark + - openWriteIds + - minWriteId + - abortedBits + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'tableName', None, None, ), # 1 + (2, TType.I64, 'writeIdHighWaterMark', None, None, ), # 2 + (3, TType.LIST, 'openWriteIds', (TType.I64,None), None, ), # 3 + (4, TType.I64, 'minWriteId', None, None, ), # 4 + (5, TType.STRING, 'abortedBits', None, None, ), # 5 + ) + + def __init__(self, tableName=None, writeIdHighWaterMark=None, openWriteIds=None, minWriteId=None, abortedBits=None,): + self.tableName = tableName + self.writeIdHighWaterMark = writeIdHighWaterMark + self.openWriteIds = openWriteIds + self.minWriteId = minWriteId + self.abortedBits = abortedBits + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.tableName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.writeIdHighWaterMark = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.openWriteIds = [] + (_etype500, _size497) = iprot.readListBegin() + for _i501 in xrange(_size497): + _elem502 = iprot.readI64() + self.openWriteIds.append(_elem502) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.minWriteId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.abortedBits = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('OpenWriteIds') + if self.tableName is not None: + oprot.writeFieldBegin('tableName', TType.STRING, 1) + oprot.writeString(self.tableName) + oprot.writeFieldEnd() + if self.writeIdHighWaterMark is not None: + oprot.writeFieldBegin('writeIdHighWaterMark', TType.I64, 2) + oprot.writeI64(self.writeIdHighWaterMark) + oprot.writeFieldEnd() + if self.openWriteIds is not None: + oprot.writeFieldBegin('openWriteIds', TType.LIST, 3) + oprot.writeListBegin(TType.I64, len(self.openWriteIds)) + for iter503 in self.openWriteIds: + oprot.writeI64(iter503) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.minWriteId is not None: + oprot.writeFieldBegin('minWriteId', TType.I64, 4) + oprot.writeI64(self.minWriteId) + oprot.writeFieldEnd() + if self.abortedBits is not None: + oprot.writeFieldBegin('abortedBits', TType.STRING, 5) + oprot.writeString(self.abortedBits) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.tableName is None: + raise TProtocol.TProtocolException(message='Required field tableName is unset!') + if self.writeIdHighWaterMark is None: + raise TProtocol.TProtocolException(message='Required field writeIdHighWaterMark is unset!') + if self.openWriteIds is None: + raise TProtocol.TProtocolException(message='Required field openWriteIds is unset!') + if self.abortedBits is None: + raise TProtocol.TProtocolException(message='Required field abortedBits is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.tableName) + value = (value * 31) ^ hash(self.writeIdHighWaterMark) + value = (value * 31) ^ hash(self.openWriteIds) + value = (value * 31) ^ hash(self.minWriteId) + value = (value * 31) ^ hash(self.abortedBits) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetOpenWriteIdsResponse: + """ + Attributes: + - openWriteIds + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'openWriteIds', (TType.STRUCT,(OpenWriteIds, OpenWriteIds.thrift_spec)), None, ), # 1 + ) + + def __init__(self, openWriteIds=None,): + self.openWriteIds = openWriteIds + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.openWriteIds = [] + (_etype507, _size504) = iprot.readListBegin() + for _i508 in xrange(_size504): + _elem509 = OpenWriteIds() + _elem509.read(iprot) + self.openWriteIds.append(_elem509) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetOpenWriteIdsResponse') + if self.openWriteIds is not None: + oprot.writeFieldBegin('openWriteIds', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.openWriteIds)) + for iter510 in self.openWriteIds: + iter510.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.openWriteIds is None: + raise TProtocol.TProtocolException(message='Required field openWriteIds is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.openWriteIds) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class AllocateTableWriteIdRequest: + """ + Attributes: + - txnIds + - dbName + - tableName + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'txnIds', (TType.I64,None), None, ), # 1 + (2, TType.STRING, 'dbName', None, None, ), # 2 + (3, TType.STRING, 'tableName', None, None, ), # 3 + ) + + def __init__(self, txnIds=None, dbName=None, tableName=None,): + self.txnIds = txnIds + self.dbName = dbName + self.tableName = tableName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.txnIds = [] + (_etype514, _size511) = iprot.readListBegin() + for _i515 in xrange(_size511): + _elem516 = iprot.readI64() + self.txnIds.append(_elem516) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tableName = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('AllocateTableWriteIdRequest') + if self.txnIds is not None: + oprot.writeFieldBegin('txnIds', TType.LIST, 1) + oprot.writeListBegin(TType.I64, len(self.txnIds)) + for iter517 in self.txnIds: + oprot.writeI64(iter517) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 2) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin('tableName', TType.STRING, 3) + oprot.writeString(self.tableName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txnIds is None: + raise TProtocol.TProtocolException(message='Required field txnIds is unset!') + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tableName is None: + raise TProtocol.TProtocolException(message='Required field tableName is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.txnIds) + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tableName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class TxnToWriteId: + """ + Attributes: + - txnId + - writeId + """ + + thrift_spec = ( + None, # 0 + (1, TType.I64, 'txnId', None, None, ), # 1 + (2, TType.I64, 'writeId', None, None, ), # 2 + ) + + def __init__(self, txnId=None, writeId=None,): + self.txnId = txnId + self.writeId = writeId + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('TxnToWriteId') + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 1) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 2) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txnId is None: + raise TProtocol.TProtocolException(message='Required field txnId is unset!') + if self.writeId is None: + raise TProtocol.TProtocolException(message='Required field writeId is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.writeId) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class AllocateTableWriteIdResponse: + """ + Attributes: + - txnToWriteIds + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'txnToWriteIds', (TType.STRUCT,(TxnToWriteId, TxnToWriteId.thrift_spec)), None, ), # 1 + ) + + def __init__(self, txnToWriteIds=None,): + self.txnToWriteIds = txnToWriteIds + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.txnToWriteIds = [] + (_etype521, _size518) = iprot.readListBegin() + for _i522 in xrange(_size518): + _elem523 = TxnToWriteId() + _elem523.read(iprot) + self.txnToWriteIds.append(_elem523) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('AllocateTableWriteIdResponse') + if self.txnToWriteIds is not None: + oprot.writeFieldBegin('txnToWriteIds', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.txnToWriteIds)) + for iter524 in self.txnToWriteIds: + iter524.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txnToWriteIds is None: + raise TProtocol.TProtocolException(message='Required field txnToWriteIds is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.txnToWriteIds) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class LockComponent: """ Attributes: @@ -10167,11 +10729,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.component = [] - (_etype493, _size490) = iprot.readListBegin() - for _i494 in xrange(_size490): - _elem495 = LockComponent() - _elem495.read(iprot) - self.component.append(_elem495) + (_etype528, _size525) = iprot.readListBegin() + for _i529 in xrange(_size525): + _elem530 = LockComponent() + _elem530.read(iprot) + self.component.append(_elem530) iprot.readListEnd() else: iprot.skip(ftype) @@ -10208,8 +10770,8 @@ def write(self, oprot): if self.component is not None: oprot.writeFieldBegin('component', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.component)) - for iter496 in self.component: - iter496.write(oprot) + for iter531 in self.component: + iter531.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.txnid is not None: @@ -10907,11 +11469,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.locks = [] - (_etype500, _size497) = iprot.readListBegin() - for _i501 in xrange(_size497): - _elem502 = ShowLocksResponseElement() - _elem502.read(iprot) - self.locks.append(_elem502) + (_etype535, _size532) = iprot.readListBegin() + for _i536 in xrange(_size532): + _elem537 = ShowLocksResponseElement() + _elem537.read(iprot) + self.locks.append(_elem537) iprot.readListEnd() else: iprot.skip(ftype) @@ -10928,8 +11490,8 @@ def write(self, oprot): if self.locks is not None: oprot.writeFieldBegin('locks', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.locks)) - for iter503 in self.locks: - iter503.write(oprot) + for iter538 in self.locks: + iter538.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11144,20 +11706,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.SET: self.aborted = set() - (_etype507, _size504) = iprot.readSetBegin() - for _i508 in xrange(_size504): - _elem509 = iprot.readI64() - self.aborted.add(_elem509) + (_etype542, _size539) = iprot.readSetBegin() + for _i543 in xrange(_size539): + _elem544 = iprot.readI64() + self.aborted.add(_elem544) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.SET: self.nosuch = set() - (_etype513, _size510) = iprot.readSetBegin() - for _i514 in xrange(_size510): - _elem515 = iprot.readI64() - self.nosuch.add(_elem515) + (_etype548, _size545) = iprot.readSetBegin() + for _i549 in xrange(_size545): + _elem550 = iprot.readI64() + self.nosuch.add(_elem550) iprot.readSetEnd() else: iprot.skip(ftype) @@ -11174,15 +11736,15 @@ def write(self, oprot): if self.aborted is not None: oprot.writeFieldBegin('aborted', TType.SET, 1) oprot.writeSetBegin(TType.I64, len(self.aborted)) - for iter516 in self.aborted: - oprot.writeI64(iter516) + for iter551 in self.aborted: + oprot.writeI64(iter551) oprot.writeSetEnd() oprot.writeFieldEnd() if self.nosuch is not None: oprot.writeFieldBegin('nosuch', TType.SET, 2) oprot.writeSetBegin(TType.I64, len(self.nosuch)) - for iter517 in self.nosuch: - oprot.writeI64(iter517) + for iter552 in self.nosuch: + oprot.writeI64(iter552) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11279,11 +11841,11 @@ def read(self, iprot): elif fid == 6: if ftype == TType.MAP: self.properties = {} - (_ktype519, _vtype520, _size518 ) = iprot.readMapBegin() - for _i522 in xrange(_size518): - _key523 = iprot.readString() - _val524 = iprot.readString() - self.properties[_key523] = _val524 + (_ktype554, _vtype555, _size553 ) = iprot.readMapBegin() + for _i557 in xrange(_size553): + _key558 = iprot.readString() + _val559 = iprot.readString() + self.properties[_key558] = _val559 iprot.readMapEnd() else: iprot.skip(ftype) @@ -11320,9 +11882,9 @@ def write(self, oprot): if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 6) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter525,viter526 in self.properties.items(): - oprot.writeString(kiter525) - oprot.writeString(viter526) + for kiter560,viter561 in self.properties.items(): + oprot.writeString(kiter560) + oprot.writeString(viter561) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11757,11 +12319,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.compacts = [] - (_etype530, _size527) = iprot.readListBegin() - for _i531 in xrange(_size527): - _elem532 = ShowCompactResponseElement() - _elem532.read(iprot) - self.compacts.append(_elem532) + (_etype565, _size562) = iprot.readListBegin() + for _i566 in xrange(_size562): + _elem567 = ShowCompactResponseElement() + _elem567.read(iprot) + self.compacts.append(_elem567) iprot.readListEnd() else: iprot.skip(ftype) @@ -11778,8 +12340,8 @@ def write(self, oprot): if self.compacts is not None: oprot.writeFieldBegin('compacts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.compacts)) - for iter533 in self.compacts: - iter533.write(oprot) + for iter568 in self.compacts: + iter568.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11860,10 +12422,10 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partitionnames = [] - (_etype537, _size534) = iprot.readListBegin() - for _i538 in xrange(_size534): - _elem539 = iprot.readString() - self.partitionnames.append(_elem539) + (_etype572, _size569) = iprot.readListBegin() + for _i573 in xrange(_size569): + _elem574 = iprot.readString() + self.partitionnames.append(_elem574) iprot.readListEnd() else: iprot.skip(ftype) @@ -11897,8 +12459,8 @@ def write(self, oprot): if self.partitionnames is not None: oprot.writeFieldBegin('partitionnames', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partitionnames)) - for iter540 in self.partitionnames: - oprot.writeString(iter540) + for iter575 in self.partitionnames: + oprot.writeString(iter575) oprot.writeListEnd() oprot.writeFieldEnd() if self.operationType is not None: @@ -12117,10 +12679,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.SET: self.tablesUsed = set() - (_etype544, _size541) = iprot.readSetBegin() - for _i545 in xrange(_size541): - _elem546 = iprot.readString() - self.tablesUsed.add(_elem546) + (_etype579, _size576) = iprot.readSetBegin() + for _i580 in xrange(_size576): + _elem581 = iprot.readString() + self.tablesUsed.add(_elem581) iprot.readSetEnd() else: iprot.skip(ftype) @@ -12150,8 +12712,8 @@ def write(self, oprot): if self.tablesUsed is not None: oprot.writeFieldBegin('tablesUsed', TType.SET, 3) oprot.writeSetBegin(TType.STRING, len(self.tablesUsed)) - for iter547 in self.tablesUsed: - oprot.writeString(iter547) + for iter582 in self.tablesUsed: + oprot.writeString(iter582) oprot.writeSetEnd() oprot.writeFieldEnd() if self.validTxnList is not None: @@ -12447,11 +13009,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.events = [] - (_etype551, _size548) = iprot.readListBegin() - for _i552 in xrange(_size548): - _elem553 = NotificationEvent() - _elem553.read(iprot) - self.events.append(_elem553) + (_etype586, _size583) = iprot.readListBegin() + for _i587 in xrange(_size583): + _elem588 = NotificationEvent() + _elem588.read(iprot) + self.events.append(_elem588) iprot.readListEnd() else: iprot.skip(ftype) @@ -12468,8 +13030,8 @@ def write(self, oprot): if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.events)) - for iter554 in self.events: - iter554.write(oprot) + for iter589 in self.events: + iter589.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12750,20 +13312,20 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.filesAdded = [] - (_etype558, _size555) = iprot.readListBegin() - for _i559 in xrange(_size555): - _elem560 = iprot.readString() - self.filesAdded.append(_elem560) + (_etype593, _size590) = iprot.readListBegin() + for _i594 in xrange(_size590): + _elem595 = iprot.readString() + self.filesAdded.append(_elem595) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.filesAddedChecksum = [] - (_etype564, _size561) = iprot.readListBegin() - for _i565 in xrange(_size561): - _elem566 = iprot.readString() - self.filesAddedChecksum.append(_elem566) + (_etype599, _size596) = iprot.readListBegin() + for _i600 in xrange(_size596): + _elem601 = iprot.readString() + self.filesAddedChecksum.append(_elem601) iprot.readListEnd() else: iprot.skip(ftype) @@ -12784,15 +13346,15 @@ def write(self, oprot): if self.filesAdded is not None: oprot.writeFieldBegin('filesAdded', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.filesAdded)) - for iter567 in self.filesAdded: - oprot.writeString(iter567) + for iter602 in self.filesAdded: + oprot.writeString(iter602) oprot.writeListEnd() oprot.writeFieldEnd() if self.filesAddedChecksum is not None: oprot.writeFieldBegin('filesAddedChecksum', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.filesAddedChecksum)) - for iter568 in self.filesAddedChecksum: - oprot.writeString(iter568) + for iter603 in self.filesAddedChecksum: + oprot.writeString(iter603) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12947,10 +13509,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionVals = [] - (_etype572, _size569) = iprot.readListBegin() - for _i573 in xrange(_size569): - _elem574 = iprot.readString() - self.partitionVals.append(_elem574) + (_etype607, _size604) = iprot.readListBegin() + for _i608 in xrange(_size604): + _elem609 = iprot.readString() + self.partitionVals.append(_elem609) iprot.readListEnd() else: iprot.skip(ftype) @@ -12983,8 +13545,8 @@ def write(self, oprot): if self.partitionVals is not None: oprot.writeFieldBegin('partitionVals', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionVals)) - for iter575 in self.partitionVals: - oprot.writeString(iter575) + for iter610 in self.partitionVals: + oprot.writeString(iter610) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13171,12 +13733,12 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype577, _vtype578, _size576 ) = iprot.readMapBegin() - for _i580 in xrange(_size576): - _key581 = iprot.readI64() - _val582 = MetadataPpdResult() - _val582.read(iprot) - self.metadata[_key581] = _val582 + (_ktype612, _vtype613, _size611 ) = iprot.readMapBegin() + for _i615 in xrange(_size611): + _key616 = iprot.readI64() + _val617 = MetadataPpdResult() + _val617.read(iprot) + self.metadata[_key616] = _val617 iprot.readMapEnd() else: iprot.skip(ftype) @@ -13198,9 +13760,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.metadata)) - for kiter583,viter584 in self.metadata.items(): - oprot.writeI64(kiter583) - viter584.write(oprot) + for kiter618,viter619 in self.metadata.items(): + oprot.writeI64(kiter618) + viter619.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -13270,10 +13832,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype588, _size585) = iprot.readListBegin() - for _i589 in xrange(_size585): - _elem590 = iprot.readI64() - self.fileIds.append(_elem590) + (_etype623, _size620) = iprot.readListBegin() + for _i624 in xrange(_size620): + _elem625 = iprot.readI64() + self.fileIds.append(_elem625) iprot.readListEnd() else: iprot.skip(ftype) @@ -13305,8 +13867,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter591 in self.fileIds: - oprot.writeI64(iter591) + for iter626 in self.fileIds: + oprot.writeI64(iter626) oprot.writeListEnd() oprot.writeFieldEnd() if self.expr is not None: @@ -13380,11 +13942,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype593, _vtype594, _size592 ) = iprot.readMapBegin() - for _i596 in xrange(_size592): - _key597 = iprot.readI64() - _val598 = iprot.readString() - self.metadata[_key597] = _val598 + (_ktype628, _vtype629, _size627 ) = iprot.readMapBegin() + for _i631 in xrange(_size627): + _key632 = iprot.readI64() + _val633 = iprot.readString() + self.metadata[_key632] = _val633 iprot.readMapEnd() else: iprot.skip(ftype) @@ -13406,9 +13968,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRING, len(self.metadata)) - for kiter599,viter600 in self.metadata.items(): - oprot.writeI64(kiter599) - oprot.writeString(viter600) + for kiter634,viter635 in self.metadata.items(): + oprot.writeI64(kiter634) + oprot.writeString(viter635) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -13469,10 +14031,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype604, _size601) = iprot.readListBegin() - for _i605 in xrange(_size601): - _elem606 = iprot.readI64() - self.fileIds.append(_elem606) + (_etype639, _size636) = iprot.readListBegin() + for _i640 in xrange(_size636): + _elem641 = iprot.readI64() + self.fileIds.append(_elem641) iprot.readListEnd() else: iprot.skip(ftype) @@ -13489,8 +14051,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter607 in self.fileIds: - oprot.writeI64(iter607) + for iter642 in self.fileIds: + oprot.writeI64(iter642) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13596,20 +14158,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype611, _size608) = iprot.readListBegin() - for _i612 in xrange(_size608): - _elem613 = iprot.readI64() - self.fileIds.append(_elem613) + (_etype646, _size643) = iprot.readListBegin() + for _i647 in xrange(_size643): + _elem648 = iprot.readI64() + self.fileIds.append(_elem648) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.metadata = [] - (_etype617, _size614) = iprot.readListBegin() - for _i618 in xrange(_size614): - _elem619 = iprot.readString() - self.metadata.append(_elem619) + (_etype652, _size649) = iprot.readListBegin() + for _i653 in xrange(_size649): + _elem654 = iprot.readString() + self.metadata.append(_elem654) iprot.readListEnd() else: iprot.skip(ftype) @@ -13631,15 +14193,15 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter620 in self.fileIds: - oprot.writeI64(iter620) + for iter655 in self.fileIds: + oprot.writeI64(iter655) oprot.writeListEnd() oprot.writeFieldEnd() if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.metadata)) - for iter621 in self.metadata: - oprot.writeString(iter621) + for iter656 in self.metadata: + oprot.writeString(iter656) oprot.writeListEnd() oprot.writeFieldEnd() if self.type is not None: @@ -13747,10 +14309,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype625, _size622) = iprot.readListBegin() - for _i626 in xrange(_size622): - _elem627 = iprot.readI64() - self.fileIds.append(_elem627) + (_etype660, _size657) = iprot.readListBegin() + for _i661 in xrange(_size657): + _elem662 = iprot.readI64() + self.fileIds.append(_elem662) iprot.readListEnd() else: iprot.skip(ftype) @@ -13767,8 +14329,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter628 in self.fileIds: - oprot.writeI64(iter628) + for iter663 in self.fileIds: + oprot.writeI64(iter663) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13997,11 +14559,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.functions = [] - (_etype632, _size629) = iprot.readListBegin() - for _i633 in xrange(_size629): - _elem634 = Function() - _elem634.read(iprot) - self.functions.append(_elem634) + (_etype667, _size664) = iprot.readListBegin() + for _i668 in xrange(_size664): + _elem669 = Function() + _elem669.read(iprot) + self.functions.append(_elem669) iprot.readListEnd() else: iprot.skip(ftype) @@ -14018,8 +14580,8 @@ def write(self, oprot): if self.functions is not None: oprot.writeFieldBegin('functions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.functions)) - for iter635 in self.functions: - iter635.write(oprot) + for iter670 in self.functions: + iter670.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14071,10 +14633,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype639, _size636) = iprot.readListBegin() - for _i640 in xrange(_size636): - _elem641 = iprot.readI32() - self.values.append(_elem641) + (_etype674, _size671) = iprot.readListBegin() + for _i675 in xrange(_size671): + _elem676 = iprot.readI32() + self.values.append(_elem676) iprot.readListEnd() else: iprot.skip(ftype) @@ -14091,8 +14653,8 @@ def write(self, oprot): if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.I32, len(self.values)) - for iter642 in self.values: - oprot.writeI32(iter642) + for iter677 in self.values: + oprot.writeI32(iter677) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14321,10 +14883,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tblNames = [] - (_etype646, _size643) = iprot.readListBegin() - for _i647 in xrange(_size643): - _elem648 = iprot.readString() - self.tblNames.append(_elem648) + (_etype681, _size678) = iprot.readListBegin() + for _i682 in xrange(_size678): + _elem683 = iprot.readString() + self.tblNames.append(_elem683) iprot.readListEnd() else: iprot.skip(ftype) @@ -14351,8 +14913,8 @@ def write(self, oprot): if self.tblNames is not None: oprot.writeFieldBegin('tblNames', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tblNames)) - for iter649 in self.tblNames: - oprot.writeString(iter649) + for iter684 in self.tblNames: + oprot.writeString(iter684) oprot.writeListEnd() oprot.writeFieldEnd() if self.capabilities is not None: @@ -14412,11 +14974,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tables = [] - (_etype653, _size650) = iprot.readListBegin() - for _i654 in xrange(_size650): - _elem655 = Table() - _elem655.read(iprot) - self.tables.append(_elem655) + (_etype688, _size685) = iprot.readListBegin() + for _i689 in xrange(_size685): + _elem690 = Table() + _elem690.read(iprot) + self.tables.append(_elem690) iprot.readListEnd() else: iprot.skip(ftype) @@ -14433,8 +14995,8 @@ def write(self, oprot): if self.tables is not None: oprot.writeFieldBegin('tables', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tables)) - for iter656 in self.tables: - iter656.write(oprot) + for iter691 in self.tables: + iter691.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14738,10 +15300,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.SET: self.tablesUsed = set() - (_etype660, _size657) = iprot.readSetBegin() - for _i661 in xrange(_size657): - _elem662 = iprot.readString() - self.tablesUsed.add(_elem662) + (_etype695, _size692) = iprot.readSetBegin() + for _i696 in xrange(_size692): + _elem697 = iprot.readString() + self.tablesUsed.add(_elem697) iprot.readSetEnd() else: iprot.skip(ftype) @@ -14767,8 +15329,8 @@ def write(self, oprot): if self.tablesUsed is not None: oprot.writeFieldBegin('tablesUsed', TType.SET, 2) oprot.writeSetBegin(TType.STRING, len(self.tablesUsed)) - for iter663 in self.tablesUsed: - oprot.writeString(iter663) + for iter698 in self.tablesUsed: + oprot.writeString(iter698) oprot.writeSetEnd() oprot.writeFieldEnd() if self.invalidationTime is not None: @@ -15670,44 +16232,44 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.pools = [] - (_etype667, _size664) = iprot.readListBegin() - for _i668 in xrange(_size664): - _elem669 = WMPool() - _elem669.read(iprot) - self.pools.append(_elem669) + (_etype702, _size699) = iprot.readListBegin() + for _i703 in xrange(_size699): + _elem704 = WMPool() + _elem704.read(iprot) + self.pools.append(_elem704) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.mappings = [] - (_etype673, _size670) = iprot.readListBegin() - for _i674 in xrange(_size670): - _elem675 = WMMapping() - _elem675.read(iprot) - self.mappings.append(_elem675) + (_etype708, _size705) = iprot.readListBegin() + for _i709 in xrange(_size705): + _elem710 = WMMapping() + _elem710.read(iprot) + self.mappings.append(_elem710) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.triggers = [] - (_etype679, _size676) = iprot.readListBegin() - for _i680 in xrange(_size676): - _elem681 = WMTrigger() - _elem681.read(iprot) - self.triggers.append(_elem681) + (_etype714, _size711) = iprot.readListBegin() + for _i715 in xrange(_size711): + _elem716 = WMTrigger() + _elem716.read(iprot) + self.triggers.append(_elem716) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.poolTriggers = [] - (_etype685, _size682) = iprot.readListBegin() - for _i686 in xrange(_size682): - _elem687 = WMPoolTrigger() - _elem687.read(iprot) - self.poolTriggers.append(_elem687) + (_etype720, _size717) = iprot.readListBegin() + for _i721 in xrange(_size717): + _elem722 = WMPoolTrigger() + _elem722.read(iprot) + self.poolTriggers.append(_elem722) iprot.readListEnd() else: iprot.skip(ftype) @@ -15728,29 +16290,29 @@ def write(self, oprot): if self.pools is not None: oprot.writeFieldBegin('pools', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.pools)) - for iter688 in self.pools: - iter688.write(oprot) + for iter723 in self.pools: + iter723.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.mappings is not None: oprot.writeFieldBegin('mappings', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.mappings)) - for iter689 in self.mappings: - iter689.write(oprot) + for iter724 in self.mappings: + iter724.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter690 in self.triggers: - iter690.write(oprot) + for iter725 in self.triggers: + iter725.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.poolTriggers is not None: oprot.writeFieldBegin('poolTriggers', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.poolTriggers)) - for iter691 in self.poolTriggers: - iter691.write(oprot) + for iter726 in self.poolTriggers: + iter726.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16224,11 +16786,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.resourcePlans = [] - (_etype695, _size692) = iprot.readListBegin() - for _i696 in xrange(_size692): - _elem697 = WMResourcePlan() - _elem697.read(iprot) - self.resourcePlans.append(_elem697) + (_etype730, _size727) = iprot.readListBegin() + for _i731 in xrange(_size727): + _elem732 = WMResourcePlan() + _elem732.read(iprot) + self.resourcePlans.append(_elem732) iprot.readListEnd() else: iprot.skip(ftype) @@ -16245,8 +16807,8 @@ def write(self, oprot): if self.resourcePlans is not None: oprot.writeFieldBegin('resourcePlans', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.resourcePlans)) - for iter698 in self.resourcePlans: - iter698.write(oprot) + for iter733 in self.resourcePlans: + iter733.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16550,20 +17112,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.errors = [] - (_etype702, _size699) = iprot.readListBegin() - for _i703 in xrange(_size699): - _elem704 = iprot.readString() - self.errors.append(_elem704) + (_etype737, _size734) = iprot.readListBegin() + for _i738 in xrange(_size734): + _elem739 = iprot.readString() + self.errors.append(_elem739) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.warnings = [] - (_etype708, _size705) = iprot.readListBegin() - for _i709 in xrange(_size705): - _elem710 = iprot.readString() - self.warnings.append(_elem710) + (_etype743, _size740) = iprot.readListBegin() + for _i744 in xrange(_size740): + _elem745 = iprot.readString() + self.warnings.append(_elem745) iprot.readListEnd() else: iprot.skip(ftype) @@ -16580,15 +17142,15 @@ def write(self, oprot): if self.errors is not None: oprot.writeFieldBegin('errors', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.errors)) - for iter711 in self.errors: - oprot.writeString(iter711) + for iter746 in self.errors: + oprot.writeString(iter746) oprot.writeListEnd() oprot.writeFieldEnd() if self.warnings is not None: oprot.writeFieldBegin('warnings', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.warnings)) - for iter712 in self.warnings: - oprot.writeString(iter712) + for iter747 in self.warnings: + oprot.writeString(iter747) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17165,11 +17727,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.triggers = [] - (_etype716, _size713) = iprot.readListBegin() - for _i717 in xrange(_size713): - _elem718 = WMTrigger() - _elem718.read(iprot) - self.triggers.append(_elem718) + (_etype751, _size748) = iprot.readListBegin() + for _i752 in xrange(_size748): + _elem753 = WMTrigger() + _elem753.read(iprot) + self.triggers.append(_elem753) iprot.readListEnd() else: iprot.skip(ftype) @@ -17186,8 +17748,8 @@ def write(self, oprot): if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter719 in self.triggers: - iter719.write(oprot) + for iter754 in self.triggers: + iter754.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() diff --git a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index bc58cfe..bf19644 100644 --- a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -2220,6 +2220,131 @@ class CommitTxnRequest ::Thrift::Struct.generate_accessors self end +class GetOpenWriteIdsRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + TABLENAMES = 1 + VALIDTXNSTR = 2 + + FIELDS = { + TABLENAMES => {:type => ::Thrift::Types::LIST, :name => 'tableNames', :element => {:type => ::Thrift::Types::STRING}}, + VALIDTXNSTR => {:type => ::Thrift::Types::STRING, :name => 'validTxnStr'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableNames is unset!') unless @tableNames + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field validTxnStr is unset!') unless @validTxnStr + end + + ::Thrift::Struct.generate_accessors self +end + +class OpenWriteIds + include ::Thrift::Struct, ::Thrift::Struct_Union + TABLENAME = 1 + WRITEIDHIGHWATERMARK = 2 + OPENWRITEIDS = 3 + MINWRITEID = 4 + ABORTEDBITS = 5 + + FIELDS = { + TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, + WRITEIDHIGHWATERMARK => {:type => ::Thrift::Types::I64, :name => 'writeIdHighWaterMark'}, + OPENWRITEIDS => {:type => ::Thrift::Types::LIST, :name => 'openWriteIds', :element => {:type => ::Thrift::Types::I64}}, + MINWRITEID => {:type => ::Thrift::Types::I64, :name => 'minWriteId', :optional => true}, + ABORTEDBITS => {:type => ::Thrift::Types::STRING, :name => 'abortedBits', :binary => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeIdHighWaterMark is unset!') unless @writeIdHighWaterMark + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field openWriteIds is unset!') unless @openWriteIds + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field abortedBits is unset!') unless @abortedBits + end + + ::Thrift::Struct.generate_accessors self +end + +class GetOpenWriteIdsResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + OPENWRITEIDS = 1 + + FIELDS = { + OPENWRITEIDS => {:type => ::Thrift::Types::LIST, :name => 'openWriteIds', :element => {:type => ::Thrift::Types::STRUCT, :class => ::OpenWriteIds}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field openWriteIds is unset!') unless @openWriteIds + end + + ::Thrift::Struct.generate_accessors self +end + +class AllocateTableWriteIdRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + TXNIDS = 1 + DBNAME = 2 + TABLENAME = 3 + + FIELDS = { + TXNIDS => {:type => ::Thrift::Types::LIST, :name => 'txnIds', :element => {:type => ::Thrift::Types::I64}}, + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field txnIds is unset!') unless @txnIds + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName + end + + ::Thrift::Struct.generate_accessors self +end + +class TxnToWriteId + include ::Thrift::Struct, ::Thrift::Struct_Union + TXNID = 1 + WRITEID = 2 + + FIELDS = { + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId'}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field txnId is unset!') unless @txnId + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeId is unset!') unless @writeId + end + + ::Thrift::Struct.generate_accessors self +end + +class AllocateTableWriteIdResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + TXNTOWRITEIDS = 1 + + FIELDS = { + TXNTOWRITEIDS => {:type => ::Thrift::Types::LIST, :name => 'txnToWriteIds', :element => {:type => ::Thrift::Types::STRUCT, :class => ::TxnToWriteId}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field txnToWriteIds is unset!') unless @txnToWriteIds + end + + ::Thrift::Struct.generate_accessors self +end + class LockComponent include ::Thrift::Struct, ::Thrift::Struct_Union TYPE = 1 diff --git a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index ec88131..f7e6f49 100644 --- a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -2388,6 +2388,41 @@ module ThriftHiveMetastore return end + def get_open_write_ids(rqst) + send_get_open_write_ids(rqst) + return recv_get_open_write_ids() + end + + def send_get_open_write_ids(rqst) + send_message('get_open_write_ids', Get_open_write_ids_args, :rqst => rqst) + end + + def recv_get_open_write_ids() + result = receive_message(Get_open_write_ids_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_open_write_ids failed: unknown result') + end + + def allocate_table_write_id(rqst) + send_allocate_table_write_id(rqst) + return recv_allocate_table_write_id() + end + + def send_allocate_table_write_id(rqst) + send_message('allocate_table_write_id', Allocate_table_write_id_args, :rqst => rqst) + end + + def recv_allocate_table_write_id() + result = receive_message(Allocate_table_write_id_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'allocate_table_write_id failed: unknown result') + end + def lock(rqst) send_lock(rqst) return recv_lock() @@ -4852,6 +4887,34 @@ module ThriftHiveMetastore write_result(result, oprot, 'commit_txn', seqid) end + def process_get_open_write_ids(seqid, iprot, oprot) + args = read_args(iprot, Get_open_write_ids_args) + result = Get_open_write_ids_result.new() + begin + result.success = @handler.get_open_write_ids(args.rqst) + rescue ::NoSuchTxnException => o1 + result.o1 = o1 + rescue ::MetaException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'get_open_write_ids', seqid) + end + + def process_allocate_table_write_id(seqid, iprot, oprot) + args = read_args(iprot, Allocate_table_write_id_args) + result = Allocate_table_write_id_result.new() + begin + result.success = @handler.allocate_table_write_id(args.rqst) + rescue ::NoSuchTxnException => o1 + result.o1 = o1 + rescue ::TxnAbortedException => o2 + result.o2 = o2 + rescue ::MetaException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'allocate_table_write_id', seqid) + end + def process_lock(seqid, iprot, oprot) args = read_args(iprot, Lock_args) result = Lock_result.new() @@ -10707,6 +10770,80 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Get_open_write_ids_args + include ::Thrift::Struct, ::Thrift::Struct_Union + RQST = 1 + + FIELDS = { + RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::GetOpenWriteIdsRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_open_write_ids_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetOpenWriteIdsResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchTxnException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Allocate_table_write_id_args + include ::Thrift::Struct, ::Thrift::Struct_Union + RQST = 1 + + FIELDS = { + RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::AllocateTableWriteIdRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Allocate_table_write_id_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + O3 = 3 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::AllocateTableWriteIdResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchTxnException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::TxnAbortedException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Lock_args include ::Thrift::Struct, ::Thrift::Struct_Union RQST = 1 diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 8dc9b6a..f8adaeb 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -6681,6 +6681,17 @@ public void commit_txn(CommitTxnRequest rqst) throws TException { } @Override + public GetOpenWriteIdsResponse get_open_write_ids(GetOpenWriteIdsRequest rqst) throws TException { + return getTxnHandler().getOpenWriteIds(rqst); + } + + @Override + public AllocateTableWriteIdResponse allocate_table_write_id( + AllocateTableWriteIdRequest rqst) throws TException { + return getTxnHandler().allocateTableWriteId(rqst); + } + + @Override public LockResponse lock(LockRequest rqst) throws TException { return getTxnHandler().lock(rqst); } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 2e76e17..b50ddb1 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -54,6 +54,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; @@ -2180,6 +2182,19 @@ public ValidTxnList getValidTxns(long currentTxn) throws TException { } @Override + public ValidWriteIdList getValidWriteIds(String tableName) throws TException { + GetOpenWriteIdsRequest rqst = new GetOpenWriteIdsRequest(Collections.singletonList(tableName), null); + GetOpenWriteIdsResponse openWriteIds = client.get_open_write_ids(rqst); + return TxnUtils.createValidReaderWriteIdList(openWriteIds.getOpenWriteIds().get(0)); + } + + @Override + public ValidTxnWriteIdList getValidWriteIds(List tablesList, String validTxnStr) throws TException { + GetOpenWriteIdsRequest rqst = new GetOpenWriteIdsRequest(tablesList, validTxnStr); + return TxnUtils.createValidTxnWriteIdList(client.get_open_write_ids(rqst)); + } + + @Override public long openTxn(String user) throws TException { OpenTxnsResponse txns = openTxns(user, 1); return txns.getTxn_ids().get(0); @@ -2219,6 +2234,20 @@ public void abortTxns(List txnids) throws NoSuchTxnException, TException { } @Override + public long allocateTableWriteId(long txnId, String dbName, String tableName) throws TException { + return allocateTableWriteIdsBatch(Collections.singletonList(txnId), dbName, tableName).get(0).getWriteId(); + } + + @Override + public List allocateTableWriteIdsBatch(List txnIds, String dbName, String tableName) + throws TException { + AllocateTableWriteIdRequest rqst + = new AllocateTableWriteIdRequest(txnIds, dbName, tableName); + AllocateTableWriteIdResponse writeId = client.allocate_table_write_id(rqst); + return writeId.getTxnToWriteIds(); + } + + @Override public LockResponse lock(LockRequest request) throws NoSuchTxnException, TxnAbortedException, TException { return client.lock(request); diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 96d4590..215399f 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -29,6 +29,8 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.classification.RetrySemantics; import org.apache.hadoop.hive.metastore.annotation.NoReconnect; import org.apache.hadoop.hive.metastore.api.AggrStats; @@ -99,6 +101,7 @@ import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.api.TxnAbortedException; import org.apache.hadoop.hive.metastore.api.TxnOpenException; +import org.apache.hadoop.hive.metastore.api.TxnToWriteId; import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; @@ -1346,14 +1349,32 @@ GetAllFunctionsResponse getAllFunctions() /** * Get a structure that details valid transactions. - * @param currentTxn The current transaction of the caller. This will be removed from the + * @param currentTxn The current transaction of the caller. This will be removed from the * exceptions list so that the caller sees records from his own transaction. - * @return list of valid transactions + * @return list of valid transactions and also valid write IDs for each input table. * @throws TException */ ValidTxnList getValidTxns(long currentTxn) throws TException; /** + * Get a structure that details valid transactions. + * @param tableName full table name of format . + * @return list of valid write ids for the given table + * @throws TException + */ + ValidWriteIdList getValidWriteIds(String tableName) throws TException; + + /** + * Get a structure that details valid transactions. + * @param tablesList list of tables read from the current transaction for which needs to populate + * the valid write ids + * @param validTxnStr snapshot of valid txns for the current txn + * @return list of valid write ids for the given list of tables. + * @throws TException + */ + ValidTxnWriteIdList getValidWriteIds(List tablesList, String validTxnStr) throws TException; + + /** * Initiate a transaction. * @param user User who is opening this transaction. This is the Hive user, * not necessarily the OS user. It is assumed that this user has already been @@ -1422,6 +1443,24 @@ void commitTxn(long txnid) void abortTxns(List txnids) throws TException; /** + * Allocate a per table write ID and associate it with the given transaction + * @param txnId id of transaction to which the allocated write ID to be associated. + * @param dbName name of DB in which the table belongs. + * @param tableName table to which the write ID to be allocated + * @throws TException + */ + long allocateTableWriteId(long txnId, String dbName, String tableName) throws TException; + + /** + * Allocate a per table write ID and associate it with the given transaction + * @param txnIds ids of transaction batchto which the allocated write ID to be associated. + * @param dbName name of DB in which the table belongs. + * @param tableName table to which the write ID to be allocated + * @throws TException + */ + List allocateTableWriteIdsBatch(List txnIds, String dbName, String tableName) throws TException; + + /** * Show the list of currently open transactions. This is for use by "show transactions" in the * grammar, not for applications that want to find a list of current transactions to work with. * Those wishing the latter should call {@link #getValidTxns()}. diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java index 41e428b..1834e20 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hive.metastore.txn; -import org.apache.hadoop.hive.common.ValidCompactorTxnList; +import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; +import org.apache.hadoop.hive.metastore.api.OpenWriteIds; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -42,11 +42,11 @@ public boolean tooManyAborts = false; /** * {@code 0} means it wasn't set (e.g. in case of upgrades, since ResultSet.getLong() will return 0 if field is NULL) - * See {@link TxnStore#setCompactionHighestTxnId(CompactionInfo, long)} for precise definition. - * See also {@link TxnUtils#createValidCompactTxnList(GetOpenTxnsInfoResponse)} and - * {@link ValidCompactorTxnList#highWatermark} + * See {@link TxnStore#setCompactionHighestWriteId(CompactionInfo, long)} for precise definition. + * See also {@link TxnUtils#createValidCompactWriteIdList(OpenWriteIds)} and + * {@link ValidCompactorWriteIdList#highWatermark} */ - public long highestTxnId; + public long highestWriteId; byte[] metaInfo; String hadoopJobId; @@ -107,7 +107,7 @@ public String toString() { "properties:" + properties + "," + "runAs:" + runAs + "," + "tooManyAborts:" + tooManyAborts + "," + - "highestTxnId:" + highestTxnId; + "highestWriteId:" + highestWriteId; } /** @@ -127,7 +127,7 @@ static CompactionInfo loadFullFromCompactionQueue(ResultSet rs) throws SQLExcept fullCi.workerId = rs.getString(8); fullCi.start = rs.getLong(9); fullCi.runAs = rs.getString(10); - fullCi.highestTxnId = rs.getLong(11); + fullCi.highestWriteId = rs.getLong(11); fullCi.metaInfo = rs.getBytes(12); fullCi.hadoopJobId = rs.getString(13); return fullCi; @@ -144,7 +144,7 @@ static void insertIntoCompletedCompactions(PreparedStatement pStmt, CompactionIn pStmt.setLong(9, ci.start); pStmt.setLong(10, endTime); pStmt.setString(11, ci.runAs); - pStmt.setLong(12, ci.highestTxnId); + pStmt.setLong(12, ci.highestWriteId); pStmt.setBytes(13, ci.metaInfo); pStmt.setString(14, ci.hadoopJobId); } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index a90b7d4..ab4f628 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -287,7 +287,7 @@ public void markCompacted(CompactionInfo info) throws MetaException { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); String s = "select cq_id, cq_database, cq_table, cq_partition, " + - "cq_type, cq_run_as, cq_highest_txn_id from COMPACTION_QUEUE where cq_state = '" + READY_FOR_CLEANING + "'"; + "cq_type, cq_run_as, cq_highest_write_id from COMPACTION_QUEUE where cq_state = '" + READY_FOR_CLEANING + "'"; LOG.debug("Going to execute query <" + s + ">"); rs = stmt.executeQuery(s); while (rs.next()) { @@ -302,7 +302,7 @@ public void markCompacted(CompactionInfo info) throws MetaException { default: throw new MetaException("Unexpected compaction type " + rs.getString(5)); } info.runAs = rs.getString(6); - info.highestTxnId = rs.getLong(7); + info.highestWriteId = rs.getLong(7); rc.add(info); } LOG.debug("Going to rollback"); @@ -338,7 +338,7 @@ public void markCleaned(CompactionInfo info) throws MetaException { ResultSet rs = null; try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?"); + pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?"); pStmt.setLong(1, info.id); rs = pStmt.executeQuery(); if(rs.next()) { @@ -358,20 +358,20 @@ public void markCleaned(CompactionInfo info) throws MetaException { LOG.debug("Going to rollback"); dbConn.rollback(); } - pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_TXN_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)"); + pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)"); info.state = SUCCEEDED_STATE; CompactionInfo.insertIntoCompletedCompactions(pStmt, info, getDbTime(dbConn)); updCount = pStmt.executeUpdate(); // Remove entries from completed_txn_components as well, so we don't start looking there - // again but only up to the highest txn ID include in this compaction job. - //highestTxnId will be NULL in upgrade scenarios + // again but only up to the highest write ID include in this compaction job. + //highestWriteId will be NULL in upgrade scenarios s = "delete from COMPLETED_TXN_COMPONENTS where ctc_database = ? and " + "ctc_table = ?"; if (info.partName != null) { s += " and ctc_partition = ?"; } - if(info.highestTxnId != 0) { + if(info.highestWriteId != 0) { s += " and ctc_txnid <= ?"; } pStmt = dbConn.prepareStatement(s); @@ -381,8 +381,8 @@ public void markCleaned(CompactionInfo info) throws MetaException { if (info.partName != null) { pStmt.setString(paramCount++, info.partName); } - if(info.highestTxnId != 0) { - pStmt.setLong(paramCount++, info.highestTxnId); + if(info.highestWriteId != 0) { + pStmt.setLong(paramCount++, info.highestWriteId); } LOG.debug("Going to execute update <" + s + ">"); if (pStmt.executeUpdate() < 1) { @@ -392,15 +392,15 @@ public void markCleaned(CompactionInfo info) throws MetaException { s = "select distinct txn_id from TXNS, TXN_COMPONENTS where txn_id = tc_txnid and txn_state = '" + TXN_ABORTED + "' and tc_database = ? and tc_table = ?"; - if (info.highestTxnId != 0) s += " and txn_id <= ?"; + if (info.highestWriteId != 0) s += " and txn_id <= ?"; if (info.partName != null) s += " and tc_partition = ?"; pStmt = dbConn.prepareStatement(s); paramCount = 1; pStmt.setString(paramCount++, info.dbname); pStmt.setString(paramCount++, info.tableName); - if(info.highestTxnId != 0) { - pStmt.setLong(paramCount++, info.highestTxnId); + if(info.highestWriteId != 0) { + pStmt.setLong(paramCount++, info.highestWriteId); } if (info.partName != null) { pStmt.setString(paramCount++, info.partName); @@ -700,14 +700,14 @@ public void revokeTimedoutWorkers(long timeout) throws MetaException { */ @Override @RetrySemantics.Idempotent - public void setCompactionHighestTxnId(CompactionInfo ci, long highestTxnId) throws MetaException { + public void setCompactionHighestWriteId(CompactionInfo ci, long highestWriteId) throws MetaException { Connection dbConn = null; Statement stmt = null; try { try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); - int updCount = stmt.executeUpdate("UPDATE COMPACTION_QUEUE SET CQ_HIGHEST_TXN_ID = " + highestTxnId + + int updCount = stmt.executeUpdate("UPDATE COMPACTION_QUEUE SET CQ_HIGHEST_WRITE_ID = " + highestWriteId + " WHERE CQ_ID = " + ci.id); if(updCount != 1) { throw new IllegalStateException("Could not find record in COMPACTION_QUEUE for " + ci); @@ -715,14 +715,14 @@ public void setCompactionHighestTxnId(CompactionInfo ci, long highestTxnId) thro dbConn.commit(); } catch (SQLException e) { rollbackDBConn(dbConn); - checkRetryable(dbConn, e, "setCompactionHighestTxnId(" + ci + "," + highestTxnId + ")"); + checkRetryable(dbConn, e, "setCompactionHighestWriteId(" + ci + "," + highestWriteId + ")"); throw new MetaException("Unable to connect to transaction database " + StringUtils.stringifyException(e)); } finally { close(null, stmt, dbConn); } } catch (RetryException ex) { - setCompactionHighestTxnId(ci, highestTxnId); + setCompactionHighestWriteId(ci, highestWriteId); } } private static class RetentionCounters { @@ -932,7 +932,7 @@ public void markFailed(CompactionInfo ci) throws MetaException {//todo: this sho try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); - pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?"); + pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?"); pStmt.setLong(1, ci.id); rs = pStmt.executeQuery(); if(rs.next()) { @@ -966,7 +966,7 @@ public void markFailed(CompactionInfo ci) throws MetaException {//todo: this sho close(rs, stmt, null); closeStmt(pStmt); - pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_TXN_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)"); + pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)"); CompactionInfo.insertIntoCompletedCompactions(pStmt, ci, getDbTime(dbConn)); int updCount = pStmt.executeUpdate(); LOG.debug("Going to commit"); diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java index e724723..1758a29 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java @@ -96,6 +96,17 @@ public static void prepDb(Configuration conf) throws Exception { " CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL)"); stmt.execute("CREATE TABLE NEXT_TXN_ID (" + " NTXN_NEXT bigint NOT NULL)"); stmt.execute("INSERT INTO NEXT_TXN_ID VALUES(1)"); + + stmt.execute("CREATE TABLE TXN_TO_WRITE_ID (" + + " T2W_TXNID bigint," + + " T2W_DATABASE varchar(128) NOT NULL," + + " T2W_TABLE varchar(256) NOT NULL," + + " T2W_WRITEID bigint NOT NULL)"); + stmt.execute("CREATE TABLE NEXT_WRITE_ID (" + + " NWI_DATABASE varchar(128) NOT NULL," + + " NWI_TABLE varchar(256) NOT NULL," + + " NWI_NEXT bigint NOT NULL)"); + stmt.execute("CREATE TABLE HIVE_LOCKS (" + " HL_LOCK_EXT_ID bigint NOT NULL," + " HL_LOCK_INT_ID bigint NOT NULL," + @@ -130,7 +141,7 @@ public static void prepDb(Configuration conf) throws Exception { " CQ_WORKER_ID varchar(128)," + " CQ_START bigint," + " CQ_RUN_AS varchar(128)," + - " CQ_HIGHEST_TXN_ID bigint," + + " CQ_HIGHEST_WRITE_ID bigint," + " CQ_META_INFO varchar(2048) for bit data," + " CQ_HADOOP_JOB_ID varchar(32))"); @@ -149,7 +160,7 @@ public static void prepDb(Configuration conf) throws Exception { " CC_START bigint," + " CC_END bigint," + " CC_RUN_AS varchar(128)," + - " CC_HIGHEST_TXN_ID bigint," + + " CC_HIGHEST_WRITE_ID bigint," + " CC_META_INFO varchar(2048) for bit data," + " CC_HADOOP_JOB_ID varchar(32))"); @@ -219,6 +230,8 @@ public static void cleanDb(Configuration conf) throws Exception { success &= dropTable(stmt, "COMPLETED_TXN_COMPONENTS", retryCount); success &= dropTable(stmt, "TXNS", retryCount); success &= dropTable(stmt, "NEXT_TXN_ID", retryCount); + success &= dropTable(stmt, "TXN_TO_WRITE_ID", retryCount); + success &= dropTable(stmt, "NEXT_WRITE_ID", retryCount); success &= dropTable(stmt, "HIVE_LOCKS", retryCount); success &= dropTable(stmt, "NEXT_LOCK_ID", retryCount); success &= dropTable(stmt, "COMPACTION_QUEUE", retryCount); diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 1bb976c..638d320 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -61,6 +61,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.classification.RetrySemantics; import org.apache.hadoop.hive.metastore.DatabaseProduct; @@ -69,6 +70,8 @@ import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest; import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdRequest; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdResponse; import org.apache.hadoop.hive.metastore.api.BasicTxnInfo; import org.apache.hadoop.hive.metastore.api.CheckLockRequest; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; @@ -80,6 +83,8 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; +import org.apache.hadoop.hive.metastore.api.GetOpenWriteIdsRequest; +import org.apache.hadoop.hive.metastore.api.GetOpenWriteIdsResponse; import org.apache.hadoop.hive.metastore.api.HeartbeatRequest; import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest; import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; @@ -94,6 +99,7 @@ import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; +import org.apache.hadoop.hive.metastore.api.OpenWriteIds; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; @@ -106,6 +112,7 @@ import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.metastore.api.TxnOpenException; import org.apache.hadoop.hive.metastore.api.TxnState; +import org.apache.hadoop.hive.metastore.api.TxnToWriteId; import org.apache.hadoop.hive.metastore.api.UnlockRequest; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; @@ -869,6 +876,204 @@ public void commitTxn(CommitTxnRequest rqst) } @Override + @RetrySemantics.ReadOnly + public GetOpenWriteIdsResponse getOpenWriteIds(GetOpenWriteIdsRequest rqst) + throws NoSuchTxnException, MetaException { + try { + // We need to figure out the current transaction number and the list of + // open transactions. To avoid needing a transaction on the underlying + // database we'll look at the current transaction number first. If it + // subsequently shows up in the open list that's ok. + Connection dbConn = null; + Statement stmt = null; + ResultSet rs = null; + ValidTxnList validTxnList; + if (rqst.isSetValidTxnStr()) { + validTxnList = new ValidReadTxnList(rqst.getValidTxnStr()); + } else { + validTxnList = TxnUtils.createValidReadTxnList(getOpenTxns(), 0); + } + try { + /** + * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()} + */ + dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); + stmt = dbConn.createStatement(); + + List openWriteIdsList = new ArrayList<>(); + for (String fullTableName : rqst.getTableNames()) { + OpenWriteIds writeIds = getOpenWriteIdsForTable(stmt, fullTableName, validTxnList); + openWriteIdsList.add(writeIds); + } + + LOG.debug("Going to rollback"); + dbConn.rollback(); + GetOpenWriteIdsResponse owr = new GetOpenWriteIdsResponse(openWriteIdsList); + return owr; + } catch (SQLException e) { + LOG.debug("Going to rollback"); + rollbackDBConn(dbConn); + checkRetryable(dbConn, e, "getOpenWriteIds"); + throw new MetaException("Unable to select from transaction database, " + + StringUtils.stringifyException(e)); + } finally { + close(null, stmt, dbConn); + } + } catch (RetryException e) { + return getOpenWriteIds(rqst); + } + } + + private OpenWriteIds getOpenWriteIdsForTable(Statement stmt, String fullTableName, + ValidTxnList validTxnList) throws SQLException { + ResultSet rs = null; + String[] names = TxnUtils.getDbTableName(fullTableName); + try { + // Need to initialize to 0 to make sure if nobody modified this table, then current txn + // shouldn't read any data + long writeIdHwm = 0; + List openWriteIdList = new ArrayList<>(); + long txnHwm = validTxnList.getHighWatermark(); + + // The output includes all the txns which are under the high water mark. It includes + // the committed transactions as well. + String s = "select t2w_txnid, t2w_writeid from TXN_TO_WRITE_ID where t2w_txnid <= " + txnHwm + + " and t2w_database = " + quoteString(names[0]) + + " and t2w_table = " + quoteString(names[1]) + + " order by t2w_writeid"; + LOG.debug("Going to execute query<" + s + ">"); + rs = stmt.executeQuery(s); + long minOpenWriteId = Long.MAX_VALUE; + BitSet abortedBits = new BitSet(); + while (rs.next()) { + long txnId = rs.getLong(1); + long writeId = rs.getLong(2); + writeIdHwm = Math.max(writeIdHwm, writeId); + if (validTxnList.isTxnValid(txnId)) { + // Skip of the transaction under evaluation is already committed. + continue; + } + + // The current txn is either in open or aborted state. + // Mark the write ids state as per the txn state. + if (validTxnList.isTxnAborted(txnId)) { + openWriteIdList.add(writeId); + abortedBits.set(openWriteIdList.size() - 1); + } else { + openWriteIdList.add(writeId); + minOpenWriteId = Math.min(minOpenWriteId, writeId); + } + } + + ByteBuffer byteBuffer = ByteBuffer.wrap(abortedBits.toByteArray()); + OpenWriteIds owi = new OpenWriteIds(fullTableName, writeIdHwm, openWriteIdList, byteBuffer); + if (minOpenWriteId < Long.MAX_VALUE) { + owi.setMinWriteId(minOpenWriteId); + } + return owi; + } finally { + close(rs); + } + } + + @Override + public AllocateTableWriteIdResponse allocateTableWriteId(AllocateTableWriteIdRequest rqst) + throws NoSuchTxnException, TxnAbortedException, MetaException { + List txnIds = rqst.getTxnIds(); + String dbName = rqst.getDbName().toLowerCase(); + String tblName = rqst.getTableName().toLowerCase(); + try { + Connection dbConn = null; + Statement stmt = null; + ResultSet rs = null; + try { + lockInternal(); + dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); + stmt = dbConn.createStatement(); + List txnToWriteIds = new ArrayList<>(); + List newAllocTxns = new ArrayList<>(); + String s; + long writeId; + + for (long txnId : txnIds) { + // Validate the transaction's state. Write ID should be allocated only for open transactions + TxnStatus txnStatus = findTxnState(txnId, stmt); + if (txnStatus != TxnStatus.OPEN) { + raiseTxnUnexpectedState(txnStatus, txnId); + shouldNeverHappen(txnId); + //dbConn is rolled back in finally{} + } + + // If table write ID is already allocated for the current transaction, then just return it + // else allocate it + s = "select t2w_writeid from TXN_TO_WRITE_ID where t2w_txnid = " + txnId + + " and t2w_database = " + quoteString(dbName) + + " and t2w_table = " + quoteString(tblName); + LOG.debug("Going to execute query <" + s + ">"); + rs = stmt.executeQuery(s); + if (rs.next()) { + writeId = rs.getLong(1); + txnToWriteIds.add(new TxnToWriteId(txnId, writeId)); + } else { + newAllocTxns.add(txnId); + } + } + + // If all the txns in the list have already allocated write ids, then just skip new allocations + long numOfWriteIds = newAllocTxns.size(); + if (0 == numOfWriteIds) { + return new AllocateTableWriteIdResponse(txnToWriteIds); + } + + // Get the next write ID for the given table and increment it + s = sqlGenerator.addForUpdateClause( + "select nwi_next from NEXT_WRITE_ID where nwi_database = " + quoteString(dbName) + + " and nwi_table = " + quoteString(tblName)); + LOG.debug("Going to execute query <" + s + ">"); + rs = stmt.executeQuery(s); + if (!rs.next()) { + // First allocation of write id should add the table to the next_write_id meta table + s = "insert into NEXT_WRITE_ID (nwi_database, nwi_table, nwi_next) values (" + + quoteString(dbName) + "," + quoteString(tblName) + "," + String.valueOf(numOfWriteIds + 1) + ")"; + LOG.debug("Going to execute insert <" + s + ">"); + stmt.execute(s); + writeId = 1; + } else { + writeId = rs.getLong(1); + s = "update NEXT_WRITE_ID set nwi_next = " + (writeId + numOfWriteIds) + + " where nwi_database = " + quoteString(dbName) + + " and nwi_table = " + quoteString(tblName); + LOG.debug("Going to execute update <" + s + ">"); + stmt.executeUpdate(s); + } + + for (long txnId : newAllocTxns) { + s = "insert into TXN_TO_WRITE_ID (t2w_txnid, t2w_database, t2w_table, t2w_writeid) values (" + + txnId + ", " + quoteString(dbName) + ", " + quoteString(tblName) + ", " + writeId + ")"; + LOG.debug("Going to execute insert <" + s + ">"); + stmt.execute(s); + txnToWriteIds.add(new TxnToWriteId(txnId, writeId++)); + } + + LOG.debug("Going to commit"); + dbConn.commit(); + return new AllocateTableWriteIdResponse(txnToWriteIds); + } catch (SQLException e) { + LOG.debug("Going to rollback"); + rollbackDBConn(dbConn); + checkRetryable(dbConn, e, "allocateTableWriteId(" + rqst + ")"); + throw new MetaException("Unable to update transaction database " + + StringUtils.stringifyException(e)); + } finally { + close(rs, stmt, dbConn); + unlockInternal(); + } + } catch (RetryException e) { + return allocateTableWriteId(rqst); + } + } + + @Override @RetrySemantics.SafeToRetry public void performWriteSetGC() { Connection dbConn = null; @@ -1906,6 +2111,18 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, buff.append("'"); queries.add(buff.toString()); + buff.setLength(0); + buff.append("delete from TXN_TO_WRITE_ID where t2w_database='"); + buff.append(dbName.toLowerCase()); + buff.append("'"); + queries.add(buff.toString()); + + buff.setLength(0); + buff.append("delete from NEXT_WRITE_ID where nwi_database='"); + buff.append(dbName.toLowerCase()); + buff.append("'"); + queries.add(buff.toString()); + break; case TABLE: dbName = table.getDbName(); @@ -1942,6 +2159,22 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, buff.append("'"); queries.add(buff.toString()); + buff.setLength(0); + buff.append("delete from TXN_TO_WRITE_ID where t2w_database='"); + buff.append(dbName.toLowerCase()); + buff.append("' and t2w_table='"); + buff.append(tblName.toLowerCase()); + buff.append("'"); + queries.add(buff.toString()); + + buff.setLength(0); + buff.append("delete from NEXT_WRITE_ID where nwi_database='"); + buff.append(dbName.toLowerCase()); + buff.append("' and nwi_table='"); + buff.append(tblName.toLowerCase()); + buff.append("'"); + queries.add(buff.toString()); + break; case PARTITION: dbName = table.getDbName(); diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java index 3e27034..a541f7d 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.metastore.api.*; import java.sql.SQLException; +import java.util.IllegalFormatCodePointException; import java.util.Iterator; import java.util.List; import java.util.Set; @@ -123,6 +124,25 @@ void commitTxn(CommitTxnRequest rqst) public BasicTxnInfo getFirstCompletedTransactionForTableAfterCommit( String inputDbName, String inputTableName, ValidTxnList txnList) throws MetaException; + /** + * Gets the list of write ids which are open/aborted + * @param rqst info on transaction and list of table names associated with given transaction + * @throws NoSuchTxnException + * @throws MetaException + */ + @RetrySemantics.ReadOnly + GetOpenWriteIdsResponse getOpenWriteIds(GetOpenWriteIdsRequest rqst) + throws NoSuchTxnException, MetaException; + + /** + * Allocate a write ID for the given table and associate it with a transaction + * @param rqst info on transaction and table to allocate write id + * @throws NoSuchTxnException + * @throws TxnAbortedException + * @throws MetaException + */ + AllocateTableWriteIdResponse allocateTableWriteId(AllocateTableWriteIdRequest rqst) + throws NoSuchTxnException, TxnAbortedException, MetaException; /** * Obtain a lock. @@ -350,10 +370,10 @@ void cleanupRecords(HiveObjectType type, Database db, Table table, List findColumnsWithStats(CompactionInfo ci) throws MetaException; /** - * Record the highest txn id that the {@code ci} compaction job will pay attention to. + * Record the highest write id that the {@code ci} compaction job will pay attention to. */ @RetrySemantics.Idempotent - void setCompactionHighestTxnId(CompactionInfo ci, long highestTxnId) throws MetaException; + void setCompactionHighestWriteId(CompactionInfo ci, long highestWriteId) throws MetaException; /** * For any given compactable entity (partition, table if not partitioned) the history of compactions diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index 027fb3f..6f7818a 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -18,16 +18,14 @@ package org.apache.hadoop.hive.metastore.txn; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.ValidCompactorTxnList; +import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TxnInfo; -import org.apache.hadoop.hive.metastore.api.TxnState; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.utils.JavaUtils; @@ -63,53 +61,84 @@ public static ValidTxnList createValidReadTxnList(GetOpenTxnsResponse txns, long BitSet abortedBits = BitSet.valueOf(txns.getAbortedBits()); long[] exceptions = new long[open.size() - (currentTxn > 0 ? 1 : 0)]; int i = 0; - for(long txn: open) { + for (long txn : open) { if (currentTxn > 0 && currentTxn == txn) continue; exceptions[i++] = txn; } - if(txns.isSetMin_open_txn()) { + if (txns.isSetMin_open_txn()) { return new ValidReadTxnList(exceptions, abortedBits, highWater, txns.getMin_open_txn()); - } - else { + } else { return new ValidReadTxnList(exceptions, abortedBits, highWater); } } /** - * Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse} to a - * {@link org.apache.hadoop.hive.common.ValidTxnList}. This assumes that the caller intends to + * Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenWriteIdsResponse} to a + * {@link org.apache.hadoop.hive.common.ValidTxnWriteIdList}. This assumes that the caller intends to + * read the files, and thus treats both open and aborted transactions as invalid. + * @param writeIds write ids list from the metastore + * @return a valid write IDs list for the whole transaction. + */ + public static ValidTxnWriteIdList createValidTxnWriteIdList(GetOpenWriteIdsResponse writeIds) { + ValidTxnWriteIdList validTxnWriteIdList = new ValidTxnWriteIdList(); + for (OpenWriteIds tableWriteIds : writeIds.getOpenWriteIds()) { + validTxnWriteIdList.addTableWriteId(createValidReaderWriteIdList(tableWriteIds)); + } + return validTxnWriteIdList; + } + + public static ValidReaderWriteIdList createValidReaderWriteIdList(OpenWriteIds tableWriteIds) { + String tableName = tableWriteIds.getTableName(); + long highWater = tableWriteIds.getWriteIdHighWaterMark(); + List open = tableWriteIds.getOpenWriteIds(); + BitSet abortedBits = BitSet.valueOf(tableWriteIds.getAbortedBits()); + long[] exceptions = new long[open.size()]; + int i = 0; + for (long writeId : open) { + exceptions[i++] = writeId; + } + if (tableWriteIds.isSetMinWriteId()) { + return new ValidReaderWriteIdList(tableName, exceptions, abortedBits, highWater, tableWriteIds.getMinWriteId()); + } else { + return new ValidReaderWriteIdList(tableName, exceptions, abortedBits, highWater); + } + } + + /** + * Transform a {@link org.apache.hadoop.hive.metastore.api.OpenWriteIds} to a + * {@link org.apache.hadoop.hive.common.ValidWriteIdList}. This assumes that the caller intends to * compact the files, and thus treats only open transactions as invalid. Additionally any - * txnId > highestOpenTxnId is also invalid. This is to avoid creating something like - * delta_17_120 where txnId 80, for example, is still open. - * @param txns txn list from the metastore - * @return a valid txn list. + * writeId > highestOpenWriteId is also invalid. This is to avoid creating something like + * delta_17_120 where writeId 80, for example, is still open. + * @param tableWriteIds table write id list from the metastore + * @return a valid write id list. */ - public static ValidTxnList createValidCompactTxnList(GetOpenTxnsInfoResponse txns) { - //highWater is the last txn id that has been allocated - long highWater = txns.getTxn_high_water_mark(); - long minOpenTxn = Long.MAX_VALUE; - long[] exceptions = new long[txns.getOpen_txnsSize()]; + public static ValidWriteIdList createValidCompactWriteIdList(OpenWriteIds tableWriteIds) { + String tableName = tableWriteIds.getTableName(); + long highWater = tableWriteIds.getWriteIdHighWaterMark(); + long minOpenWriteId = Long.MAX_VALUE; + List open = tableWriteIds.getOpenWriteIds(); + BitSet abortedBits = BitSet.valueOf(tableWriteIds.getAbortedBits()); + long[] exceptions = new long[open.size()]; int i = 0; - for (TxnInfo txn : txns.getOpen_txns()) { - if (txn.getState() == TxnState.OPEN) { - minOpenTxn = Math.min(minOpenTxn, txn.getId()); - } - else { - //only need aborted since we don't consider anything above minOpenTxn - exceptions[i++] = txn.getId(); + for (long writeId : open) { + if (abortedBits.get(i)) { + // Only need aborted since we don't consider anything above minOpenWriteId + exceptions[i++] = writeId; + } else { + minOpenWriteId = Math.min(minOpenWriteId, writeId); } } if(i < exceptions.length) { exceptions = Arrays.copyOf(exceptions, i); } - highWater = minOpenTxn == Long.MAX_VALUE ? highWater : minOpenTxn - 1; + highWater = minOpenWriteId == Long.MAX_VALUE ? highWater : minOpenWriteId - 1; BitSet bitSet = new BitSet(exceptions.length); - bitSet.set(0, exceptions.length); // for ValidCompactorTxnList, everything in exceptions are aborted - if(minOpenTxn == Long.MAX_VALUE) { - return new ValidCompactorTxnList(exceptions, bitSet, highWater); - } - else { - return new ValidCompactorTxnList(exceptions, bitSet, highWater, minOpenTxn); + bitSet.set(0, exceptions.length); // for ValidCompactorWriteIdList, everything in exceptions are aborted + if (minOpenWriteId == Long.MAX_VALUE) { + return new ValidCompactorWriteIdList(tableName, exceptions, bitSet, highWater); + } else { + return new ValidCompactorWriteIdList(tableName, exceptions, bitSet, highWater, minOpenWriteId); } } @@ -157,6 +186,20 @@ public static boolean isAcidTable(Table table) { } /** + * Should produce the same result as + * {@link org.apache.hadoop.hive.ql.io.AcidUtils#getFullTableName(String, String)} + */ + public static String getFullTableName(String dbName, String tableName) { + return dbName.toLowerCase() + "." + tableName.toLowerCase(); + } + + public static String[] getDbTableName(String fullTableName) { + return fullTableName.split("\\."); + } + + + + /** * Build a query (or queries if one query is too big but only for the case of 'IN' * composite clause. For the case of 'NOT IN' clauses, multiple queries change * the semantics of the intended query. diff --git a/standalone-metastore/src/main/thrift/hive_metastore.thrift b/standalone-metastore/src/main/thrift/hive_metastore.thrift index 371b975..90f47bb 100644 --- a/standalone-metastore/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -731,6 +731,38 @@ struct CommitTxnRequest { 1: required i64 txnid, } +struct GetOpenWriteIdsRequest { + 1: required list tableNames, + 2: required string validTxnStr, +} + +struct OpenWriteIds { + 1: required string tableName, + 2: required i64 writeIdHighWaterMark, + 3: required list openWriteIds, + 4: optional i64 minWriteId, + 5: required binary abortedBits, +} + +struct GetOpenWriteIdsResponse { + 1: required list openWriteIds, +} + +struct AllocateTableWriteIdRequest { + 1: required list txnIds, + 2: required string dbName, + 3: required string tableName, +} + +struct TxnToWriteId { + 1: required i64 txnId, + 2: required i64 writeId, +} + +struct AllocateTableWriteIdResponse { + 1: required list txnToWriteIds, +} + struct LockComponent { 1: required LockType type, 2: required LockLevel level, @@ -1805,6 +1837,10 @@ service ThriftHiveMetastore extends fb303.FacebookService void abort_txn(1:AbortTxnRequest rqst) throws (1:NoSuchTxnException o1) void abort_txns(1:AbortTxnsRequest rqst) throws (1:NoSuchTxnException o1) void commit_txn(1:CommitTxnRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2) + GetOpenWriteIdsResponse get_open_write_ids(1:GetOpenWriteIdsRequest rqst) + throws (1:NoSuchTxnException o1, 2:MetaException o2) + AllocateTableWriteIdResponse allocate_table_write_id(1:AllocateTableWriteIdRequest rqst) + throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2, 3:MetaException o3) LockResponse lock(1:LockRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2) LockResponse check_lock(1:CheckLockRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2, 3:NoSuchLockException o3) diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorWriteIdList.java similarity index 53% rename from storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java rename to storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorWriteIdList.java index 94b8c58..1fcfc6a 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorWriteIdList.java @@ -22,35 +22,38 @@ import java.util.BitSet; /** - * An implementation of {@link org.apache.hadoop.hive.common.ValidTxnList} for use by the compactor. - * + * An implementation of {@link ValidWriteIdList} for use by the compactor. + * * Compaction should only include txns up to smallest open txn (exclussive). - * There may be aborted txns in the snapshot represented by this ValidCompactorTxnList. - * Thus {@link #isTxnRangeValid(long, long)} returns NONE for any range that inluces any unresolved - * transactions. Any txn above {@code highWatermark} is unresolved. + * There may be aborted write ids in the snapshot represented by this ValidCompactorWriteIdList. + * Thus {@link #isWriteIdRangeValid(long, long)} returns NONE for any range that includes any unresolved + * write ids. Any write id above {@code highWatermark} is unresolved. * These produce the logic we need to assure that the compactor only sees records less than the lowest - * open transaction when choosing which files to compact, but that it still ignores aborted + * open write ids when choosing which files to compact, but that it still ignores aborted * records when compacting. - * + * * See org.apache.hadoop.hive.metastore.txn.TxnUtils#createValidCompactTxnList() for proper * way to construct this. */ -public class ValidCompactorTxnList extends ValidReadTxnList { - public ValidCompactorTxnList() { +public class ValidCompactorWriteIdList extends ValidReaderWriteIdList { + public ValidCompactorWriteIdList() { super(); } - public ValidCompactorTxnList(long[] abortedTxnList, BitSet abortedBits, long highWatermark) { - this(abortedTxnList, abortedBits, highWatermark, Long.MAX_VALUE); + public ValidCompactorWriteIdList(String tableName, long[] abortedWriteIdList, BitSet abortedBits, long highWatermark) { + this(tableName, abortedWriteIdList, abortedBits, highWatermark, Long.MAX_VALUE); } /** - * @param abortedTxnList list of all aborted transactions + * @param tableName table which is under compaction. Full name of format . + * @param abortedWriteIdList list of all aborted write ids * @param abortedBits bitset marking whether the corresponding transaction is aborted - * @param highWatermark highest committed transaction to be considered for compaction, - * equivalently (lowest_open_txn - 1). + * @param highWatermark highest committed write id to be considered for compaction, + * equivalently (lowest_open_write_id - 1). + * @param minOpenWriteId minimum write ID which maps to a open transaction */ - public ValidCompactorTxnList(long[] abortedTxnList, BitSet abortedBits, long highWatermark, long minOpenTxnId) { + public ValidCompactorWriteIdList(String tableName, + long[] abortedWriteIdList, BitSet abortedBits, long highWatermark, long minOpenWriteId) { // abortedBits should be all true as everything in exceptions are aborted txns - super(abortedTxnList, abortedBits, highWatermark, minOpenTxnId); + super(tableName, abortedWriteIdList, abortedBits, highWatermark, minOpenWriteId); if(this.exceptions.length <= 0) { return; } @@ -66,24 +69,24 @@ public ValidCompactorTxnList(long[] abortedTxnList, BitSet abortedBits, long hig } /* * ensure that we throw out any exceptions above highWatermark to make - * {@link #isTxnValid(long)} faster + * {@link #isWriteIdValid(long)} faster */ this.exceptions = Arrays.copyOf(this.exceptions, lastElementPos + 1); } - public ValidCompactorTxnList(String value) { + public ValidCompactorWriteIdList(String value) { super(value); } /** - * Returns org.apache.hadoop.hive.common.ValidTxnList.RangeResponse.ALL if all txns in + * Returns org.apache.hadoop.hive.common.ValidWriteIdList.RangeResponse.ALL if all write ids in * the range are resolved and RangeResponse.NONE otherwise */ @Override - public RangeResponse isTxnRangeValid(long minTxnId, long maxTxnId) { - return highWatermark >= maxTxnId ? RangeResponse.ALL : RangeResponse.NONE; + public RangeResponse isWriteIdRangeValid(long minWriteId, long maxWriteId) { + return highWatermark >= maxWriteId ? RangeResponse.ALL : RangeResponse.NONE; } @Override - public boolean isTxnAborted(long txnid) { - return Arrays.binarySearch(exceptions, txnid) >= 0; + public boolean isWriteIdAborted(long writeId) { + return Arrays.binarySearch(exceptions, writeId) >= 0; } } diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java index ccdd4b7..1433e8e 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java @@ -65,14 +65,6 @@ public boolean isTxnValid(long txnid) { return Arrays.binarySearch(exceptions, txnid) < 0; } - /** - * We cannot use a base file if its range contains an open txn. - * @param txnid from base_xxxx - */ - @Override - public boolean isValidBase(long txnid) { - return minOpenTxn > txnid && txnid <= highWatermark; - } @Override public RangeResponse isTxnRangeValid(long minTxnId, long maxTxnId) { // check the easy cases first diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java new file mode 100644 index 0000000..9e1fcc3 --- /dev/null +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java @@ -0,0 +1,254 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.common; + +import java.util.Arrays; +import java.util.BitSet; + +/** + * An implementation of {@link ValidWriteIdList} for use by readers. + * This class will view a write id as valid only if it maps to committed transaction. + * Write ids of both open and aborted transactions will be seen as invalid. + */ +public class ValidReaderWriteIdList implements ValidWriteIdList { + + protected String tableName; // Full table name of format . + protected long[] exceptions; + protected BitSet abortedBits; // BitSet for flagging aborted write ids. Bit is true if aborted, false if open + //default value means there are no open write ids in the snapshot + private long minOpenWriteId = Long.MAX_VALUE; + protected long highWatermark; + + public ValidReaderWriteIdList() { + this(null, new long[0], new BitSet(), Long.MAX_VALUE, Long.MAX_VALUE); + } + + /** + * Used if there are no open write ids in the snapshot + */ + public ValidReaderWriteIdList(String tableName, long[] exceptions, BitSet abortedBits, long highWatermark) { + this(tableName, exceptions, abortedBits, highWatermark, Long.MAX_VALUE); + } + public ValidReaderWriteIdList(String tableName, + long[] exceptions, BitSet abortedBits, long highWatermark, long minOpenWriteId) { + this.tableName = tableName; + if (exceptions.length > 0) { + this.minOpenWriteId = minOpenWriteId; + } + this.exceptions = exceptions; + this.abortedBits = abortedBits; + this.highWatermark = highWatermark; + } + + public ValidReaderWriteIdList(String value) { + readFromString(value); + } + + @Override + public boolean isWriteIdValid(long writeId) { + if (highWatermark < writeId) { + return false; + } + return Arrays.binarySearch(exceptions, writeId) < 0; + } + + /** + * We cannot use a base file if its range contains an open write id. + * @param writeId from base_xxxx + */ + @Override + public boolean isValidBase(long writeId) { + return minOpenWriteId > writeId && writeId <= highWatermark; + } + @Override + public RangeResponse isWriteIdRangeValid(long minWriteId, long maxWriteId) { + // check the easy cases first + if (highWatermark < minWriteId) { + return RangeResponse.NONE; + } else if (exceptions.length > 0 && exceptions[0] > maxWriteId) { + return RangeResponse.ALL; + } + + // since the exceptions and the range in question overlap, count the + // exceptions in the range + long count = Math.max(0, maxWriteId - highWatermark); + for(long txn: exceptions) { + if (minWriteId <= txn && txn <= maxWriteId) { + count += 1; + } + } + + if (count == 0) { + return RangeResponse.ALL; + } else if (count == (maxWriteId - minWriteId + 1)) { + return RangeResponse.NONE; + } else { + return RangeResponse.SOME; + } + } + + @Override + public String toString() { + return writeToString(); + } + + // Format is :::: + @Override + public String writeToString() { + StringBuilder buf = new StringBuilder(); + if (tableName == null) { + buf.append("null"); + } else { + buf.append(tableName); + } + buf.append(':'); + buf.append(highWatermark); + buf.append(':'); + buf.append(minOpenWriteId); + if (exceptions.length == 0) { + buf.append(':'); // separator for open write ids + buf.append(':'); // separator for aborted write ids + } else { + StringBuilder open = new StringBuilder(); + StringBuilder abort = new StringBuilder(); + for (int i = 0; i < exceptions.length; i++) { + if (abortedBits.get(i)) { + if (abort.length() > 0) { + abort.append(','); + } + abort.append(exceptions[i]); + } else { + if (open.length() > 0) { + open.append(','); + } + open.append(exceptions[i]); + } + } + buf.append(':'); + buf.append(open); + buf.append(':'); + buf.append(abort); + } + return buf.toString(); + } + + @Override + public void readFromString(String src) { + if (src == null || src.length() == 0) { + highWatermark = Long.MAX_VALUE; + exceptions = new long[0]; + abortedBits = new BitSet(); + } else { + String[] values = src.split(":"); + tableName = values[0]; + if (tableName.equalsIgnoreCase("null")) { + tableName = null; + } + highWatermark = Long.parseLong(values[1]); + minOpenWriteId = Long.parseLong(values[2]); + String[] openWriteIds = new String[0]; + String[] abortedWriteIds = new String[0]; + if (values.length < 4) { + openWriteIds = new String[0]; + abortedWriteIds = new String[0]; + } else if (values.length == 4) { + if (!values[3].isEmpty()) { + openWriteIds = values[3].split(","); + } + } else { + if (!values[3].isEmpty()) { + openWriteIds = values[3].split(","); + } + if (!values[4].isEmpty()) { + abortedWriteIds = values[4].split(","); + } + } + exceptions = new long[openWriteIds.length + abortedWriteIds.length]; + int i = 0; + for (String open : openWriteIds) { + exceptions[i++] = Long.parseLong(open); + } + for (String abort : abortedWriteIds) { + exceptions[i++] = Long.parseLong(abort); + } + Arrays.sort(exceptions); + abortedBits = new BitSet(exceptions.length); + for (String abort : abortedWriteIds) { + int index = Arrays.binarySearch(exceptions, Long.parseLong(abort)); + abortedBits.set(index); + } + } + } + + @Override + public String getTableName() { + return tableName; + } + + @Override + public long getHighWatermark() { + return highWatermark; + } + + @Override + public long[] getInvalidWriteIds() { + return exceptions; + } + + @Override + public Long getMinOpenWriteId() { + return minOpenWriteId == Long.MAX_VALUE ? null : minOpenWriteId; + } + + @Override + public boolean isWriteIdAborted(long writeId) { + int index = Arrays.binarySearch(exceptions, writeId); + return index >= 0 && abortedBits.get(index); + } + + @Override + public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) { + // check the easy cases first + if (highWatermark < minWriteId) { + return RangeResponse.NONE; + } + + int count = 0; // number of aborted txns found in exceptions + + // traverse the aborted txns list, starting at first aborted txn index + for (int i = abortedBits.nextSetBit(0); i >= 0; i = abortedBits.nextSetBit(i + 1)) { + long abortedTxnId = exceptions[i]; + if (abortedTxnId > maxWriteId) { // we've already gone beyond the specified range + break; + } + if (abortedTxnId >= minWriteId && abortedTxnId <= maxWriteId) { + count++; + } + } + + if (count == 0) { + return RangeResponse.NONE; + } else if (count == (maxWriteId - minWriteId + 1)) { + return RangeResponse.ALL; + } else { + return RangeResponse.SOME; + } + } +} + diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java index 3ffb2d8..d4c3b09 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java @@ -47,13 +47,6 @@ public boolean isTxnValid(long txnid); /** - * Returns {@code true} if such base file can be used to materialize the snapshot represented by - * this {@code ValidTxnList}. - * @param txnid highest txn in a given base_xxxx file - */ - public boolean isValidBase(long txnid); - - /** * Find out if a range of transaction ids are valid. Note that valid may have different meanings * for different implementations, as some will only want to see committed transactions and some * both committed and aborted. diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java new file mode 100644 index 0000000..5e26a6b --- /dev/null +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.common; + +import java.util.HashMap; + +/** + * An implementation to store and manage list of ValidWriteIds for each tables read by current + * transaction + */ +public class ValidTxnWriteIdList { + /** + * Key used to store valid write id list for all the operated tables in a + * {@link org.apache.hadoop.conf.Configuration} object. + */ + public static final String VALID_TABLES_WRITEIDS_KEY = "hive.txn.tables.valid.writeids"; + + private HashMap validTablesWriteIdList = new HashMap<>(); + public ValidTxnWriteIdList() { + } + + public ValidTxnWriteIdList(String value) { + readFromString(value); + } + + @Override + public String toString() { + return writeToString(); + } + + public void addTableWriteId(ValidWriteIdList validWriteIds) { + validTablesWriteIdList.put(validWriteIds.getTableName(), validWriteIds); + } + + public ValidWriteIdList getTableWriteIdList(String tableName) { + if (validTablesWriteIdList.containsKey(tableName)) { + return validTablesWriteIdList.get(tableName); + } else { + return new ValidReaderWriteIdList(); + } + } + + public int getNumOfTables() { + return validTablesWriteIdList.size(); + } + + private void readFromString(String src) { + if ((src == null) || (src.length() == 0)) { + return; + } + String[] tblWriteIdStrList = src.split("\\$"); + for (String tableStr : tblWriteIdStrList) { + ValidWriteIdList validWriteIdList = new ValidReaderWriteIdList(tableStr); + addTableWriteId(validWriteIdList); + } + } + + private String writeToString() { + StringBuilder buf = new StringBuilder(); + int index = 0; + for (HashMap.Entry entry : validTablesWriteIdList.entrySet()) { + buf.append(entry.getValue().writeToString()); + + // Separator for multiple tables' ValidWriteIdList. Also, skip it for last entry. + index++; + if (index < validTablesWriteIdList.size()) { + buf.append('$'); + } + } + return buf.toString(); + } +} diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java new file mode 100644 index 0000000..d0d1726 --- /dev/null +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.common; + +/** + * Models the list of transactions that should be included in a snapshot. + * It is modelled as a high water mark, which is the largest transaction id that + * has been committed and a list of transactions that are not included. + */ +public interface ValidWriteIdList { + + /** + * Key used to store valid write id list in a + * {@link org.apache.hadoop.conf.Configuration} object. + */ + public static final String VALID_WRITEIDS_KEY = "hive.txn.valid.writeids"; + + /** + * The response to a range query. NONE means no values in this range match, + * SOME mean that some do, and ALL means that every value does. + */ + public enum RangeResponse {NONE, SOME, ALL}; + + /** + * Indicates whether a given write ID is valid. Note that valid may have different meanings + * for different implementations, as some will only want to see committed transactions and some + * both committed and aborted. + * @param writeId write ID of the table + * @return true if valid, false otherwise + */ + public boolean isWriteIdValid(long writeId); + + /** + * Returns {@code true} if such base file can be used to materialize the snapshot represented by + * this {@code ValidWriteIdList}. + * @param writeId highest write ID in a given base_xxxx file + */ + public boolean isValidBase(long writeId); + + /** + * Find out if a range of write ids are valid. Note that valid may have different meanings + * for different implementations, as some will only want to see committed transactions and some + * both committed and aborted. + * @param minWriteId minimum write ID to look for, inclusive + * @param maxWriteId maximum write ID to look for, inclusive + * @return Indicate whether none, some, or all of these transactions are valid. + */ + public RangeResponse isWriteIdRangeValid(long minWriteId, long maxWriteId); + + /** + * Write this ValidWriteIdList into a string. This should produce a string that + * can be used by {@link #readFromString(String)} to populate a ValidWriteIdList. + */ + public String writeToString(); + + /** + * Populate this ValidWriteIdList from the string. It is assumed that the string + * was created via {@link #writeToString()} and the exceptions list is sorted. + * @param src source string. + */ + public void readFromString(String src); + + /** + * Get the table for which the ValidWriteIdList is formed + * @return table name (.) associated with ValidWriteIdList. + */ + public String getTableName(); + + /** + * Get the largest write id used. + * @return largest write id used + */ + public long getHighWatermark(); + + /** + * Get the list of write ids under the high water mark that are not valid. Note that invalid + * may have different meanings for different implementations, as some will only want to see open + * transactions and some both open and aborted. + * @return a list of invalid write ids + */ + public long[] getInvalidWriteIds(); + + /** + * Indicates whether a given write maps to aborted transaction. + * @param writeId write id to be validated + * @return true if aborted, false otherwise + */ + public boolean isWriteIdAborted(long writeId); + + /** + * Find out if a range of write ids are aborted. + * @param minWriteId minimum write Id to look for, inclusive + * @param maxWriteId maximum write Id to look for, inclusive + * @return Indicate whether none, some, or all of these write ids are aborted. + */ + public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId); + + /** + * Returns smallest Open write Id in this set, {@code null} if there is none. + */ + Long getMinOpenWriteId(); +} diff --git a/storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorTxnList.java b/storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorTxnList.java deleted file mode 100644 index 867b652..0000000 --- a/storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorTxnList.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.common; - -import org.junit.Assert; -import org.junit.Test; - -import java.util.BitSet; - -public class TestValidCompactorTxnList { - - @Test - public void minTxnHigh() { - BitSet bitSet = new BitSet(2); - bitSet.set(0, 2); - ValidTxnList txns = new ValidCompactorTxnList(new long[]{3, 4}, bitSet, 2); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - } - - @Test - public void maxTxnLow() { - BitSet bitSet = new BitSet(2); - bitSet.set(0, 2); - ValidTxnList txns = new ValidCompactorTxnList(new long[]{13, 14}, bitSet, 12); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp); - } - - @Test - public void minTxnHighNoExceptions() { - ValidTxnList txns = new ValidCompactorTxnList(new long[0], new BitSet(), 5); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - } - - @Test - public void maxTxnLowNoExceptions() { - ValidTxnList txns = new ValidCompactorTxnList(new long[0], new BitSet(), 15); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp); - } - - @Test - public void exceptionsAllBelow() { - BitSet bitSet = new BitSet(2); - bitSet.set(0, 2); - ValidTxnList txns = new ValidCompactorTxnList(new long[]{3, 6}, bitSet, 3); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - } - - @Test - public void exceptionsInMidst() { - BitSet bitSet = new BitSet(1); - bitSet.set(0, 1); - ValidTxnList txns = new ValidCompactorTxnList(new long[]{8}, bitSet, 7); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - } - @Test - public void exceptionsAbveHighWaterMark() { - BitSet bitSet = new BitSet(4); - bitSet.set(0, 4); - ValidTxnList txns = new ValidCompactorTxnList(new long[]{8, 11, 17, 29}, bitSet, 15); - Assert.assertArrayEquals("", new long[]{8, 11}, txns.getInvalidTransactions()); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp); - rsp = txns.isTxnRangeValid(12, 16); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - } - - @Test - public void writeToString() { - BitSet bitSet = new BitSet(4); - bitSet.set(0, 4); - ValidTxnList txns = new ValidCompactorTxnList(new long[]{7, 9, 10, Long.MAX_VALUE}, bitSet, 8); - Assert.assertEquals("8:" + Long.MAX_VALUE + "::7", txns.writeToString()); - txns = new ValidCompactorTxnList(); - Assert.assertEquals(Long.toString(Long.MAX_VALUE) + ":" + Long.MAX_VALUE + "::", txns.writeToString()); - txns = new ValidCompactorTxnList(new long[0], new BitSet(), 23); - Assert.assertEquals("23:" + Long.MAX_VALUE + "::", txns.writeToString()); - } - - @Test - public void readFromString() { - ValidCompactorTxnList txns = new ValidCompactorTxnList("37:" + Long.MAX_VALUE + "::7,9,10"); - Assert.assertEquals(37L, txns.getHighWatermark()); - Assert.assertNull(txns.getMinOpenTxn()); - Assert.assertArrayEquals(new long[]{7L, 9L, 10L}, txns.getInvalidTransactions()); - txns = new ValidCompactorTxnList("21:" + Long.MAX_VALUE + ":"); - Assert.assertEquals(21L, txns.getHighWatermark()); - Assert.assertNull(txns.getMinOpenTxn()); - Assert.assertEquals(0, txns.getInvalidTransactions().length); - } - - @Test - public void testAbortedTxn() throws Exception { - ValidCompactorTxnList txnList = new ValidCompactorTxnList("5:4::1,2,3"); - Assert.assertEquals(5L, txnList.getHighWatermark()); - Assert.assertEquals(4, txnList.getMinOpenTxn().longValue()); - Assert.assertArrayEquals(new long[]{1L, 2L, 3L}, txnList.getInvalidTransactions()); - } - - @Test - public void testAbortedRange() throws Exception { - ValidCompactorTxnList txnList = new ValidCompactorTxnList("11:4::5,6,7,8"); - ValidTxnList.RangeResponse rsp = txnList.isTxnRangeAborted(1L, 3L); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - rsp = txnList.isTxnRangeAborted(9L, 10L); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - rsp = txnList.isTxnRangeAborted(6L, 7L); - Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp); - rsp = txnList.isTxnRangeAborted(4L, 6L); - Assert.assertEquals(ValidTxnList.RangeResponse.SOME, rsp); - rsp = txnList.isTxnRangeAborted(6L, 13L); - Assert.assertEquals(ValidTxnList.RangeResponse.SOME, rsp); - } -} diff --git a/storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorWriteIdList.java b/storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorWriteIdList.java new file mode 100644 index 0000000..9530edb --- /dev/null +++ b/storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorWriteIdList.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.common; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.BitSet; + +public class TestValidCompactorWriteIdList { + private final String tableName = "t1"; + + @Test + public void minTxnHigh() { + BitSet bitSet = new BitSet(2); + bitSet.set(0, 2); + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[]{3, 4}, bitSet, 2); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + } + + @Test + public void maxTxnLow() { + BitSet bitSet = new BitSet(2); + bitSet.set(0, 2); + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[]{13, 14}, bitSet, 12); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.ALL, rsp); + } + + @Test + public void minTxnHighNoExceptions() { + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[0], new BitSet(), 5); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + } + + @Test + public void maxTxnLowNoExceptions() { + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[0], new BitSet(), 15); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.ALL, rsp); + } + + @Test + public void exceptionsAllBelow() { + BitSet bitSet = new BitSet(2); + bitSet.set(0, 2); + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[]{3, 6}, bitSet, 3); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + } + + @Test + public void exceptionsInMidst() { + BitSet bitSet = new BitSet(1); + bitSet.set(0, 1); + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[]{8}, bitSet, 7); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + } + @Test + public void exceptionsAbveHighWaterMark() { + BitSet bitSet = new BitSet(4); + bitSet.set(0, 4); + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[]{8, 11, 17, 29}, bitSet, 15); + Assert.assertArrayEquals("", new long[]{8, 11}, writeIds.getInvalidWriteIds()); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.ALL, rsp); + rsp = writeIds.isWriteIdRangeValid(12, 16); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + } + + @Test + public void writeToString() { + BitSet bitSet = new BitSet(4); + bitSet.set(0, 4); + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[]{7, 9, 10, Long.MAX_VALUE}, bitSet, 8); + Assert.assertEquals(tableName + ":8:" + Long.MAX_VALUE + "::7", writeIds.writeToString()); + writeIds = new ValidCompactorWriteIdList(); + Assert.assertEquals("null:" + Long.toString(Long.MAX_VALUE) + ":" + Long.MAX_VALUE + "::", writeIds.writeToString()); + writeIds = new ValidCompactorWriteIdList(tableName, new long[0], new BitSet(), 23); + Assert.assertEquals(tableName + ":23:" + Long.MAX_VALUE + "::", writeIds.writeToString()); + } + + @Test + public void readFromString() { + ValidCompactorWriteIdList writeIds = new ValidCompactorWriteIdList(tableName + ":37:" + Long.MAX_VALUE + "::7,9,10"); + Assert.assertEquals(tableName, writeIds.getTableName()); + Assert.assertEquals(37L, writeIds.getHighWatermark()); + Assert.assertNull(writeIds.getMinOpenWriteId()); + Assert.assertArrayEquals(new long[]{7L, 9L, 10L}, writeIds.getInvalidWriteIds()); + writeIds = new ValidCompactorWriteIdList(tableName + ":21:" + Long.MAX_VALUE + ":"); + Assert.assertEquals(21L, writeIds.getHighWatermark()); + Assert.assertNull(writeIds.getMinOpenWriteId()); + Assert.assertEquals(0, writeIds.getInvalidWriteIds().length); + } + + @Test + public void testAbortedTxn() throws Exception { + ValidCompactorWriteIdList writeIdList = new ValidCompactorWriteIdList(tableName + ":5:4::1,2,3"); + Assert.assertEquals(5L, writeIdList.getHighWatermark()); + Assert.assertEquals(4, writeIdList.getMinOpenWriteId().longValue()); + Assert.assertArrayEquals(new long[]{1L, 2L, 3L}, writeIdList.getInvalidWriteIds()); + } + + @Test + public void testAbortedRange() throws Exception { + ValidCompactorWriteIdList writeIdList = new ValidCompactorWriteIdList(tableName + ":11:4::5,6,7,8"); + ValidWriteIdList.RangeResponse rsp = writeIdList.isWriteIdRangeAborted(1L, 3L); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + rsp = writeIdList.isWriteIdRangeAborted(9L, 10L); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + rsp = writeIdList.isWriteIdRangeAborted(6L, 7L); + Assert.assertEquals(ValidWriteIdList.RangeResponse.ALL, rsp); + rsp = writeIdList.isWriteIdRangeAborted(4L, 6L); + Assert.assertEquals(ValidWriteIdList.RangeResponse.SOME, rsp); + rsp = writeIdList.isWriteIdRangeAborted(6L, 13L); + Assert.assertEquals(ValidWriteIdList.RangeResponse.SOME, rsp); + } +} diff --git a/storage-api/src/test/org/apache/hadoop/hive/common/TestValidReaderWriteIdList.java b/storage-api/src/test/org/apache/hadoop/hive/common/TestValidReaderWriteIdList.java new file mode 100644 index 0000000..68ffb44 --- /dev/null +++ b/storage-api/src/test/org/apache/hadoop/hive/common/TestValidReaderWriteIdList.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.common; + +import junit.framework.Assert; +import org.apache.hadoop.conf.Configuration; +import org.junit.Test; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.util.BitSet; + +/** + * Tests for {@link ValidReaderWriteIdList} + */ +public class TestValidReaderWriteIdList { + private final String tableName = "t1"; + + @Test + public void noExceptions() throws Exception { + ValidWriteIdList writeIdList = new ValidReaderWriteIdList(tableName, new long[0], new BitSet(), 1, Long.MAX_VALUE); + String str = writeIdList.writeToString(); + Assert.assertEquals(tableName + ":1:" + Long.MAX_VALUE + "::", str); + ValidWriteIdList newList = new ValidReaderWriteIdList(); + newList.readFromString(str); + Assert.assertTrue(newList.isWriteIdValid(1)); + Assert.assertFalse(newList.isWriteIdValid(2)); + } + + @Test + public void exceptions() throws Exception { + ValidWriteIdList writeIdList = new ValidReaderWriteIdList(tableName, new long[]{2L,4L}, new BitSet(), 5, 4L); + String str = writeIdList.writeToString(); + Assert.assertEquals(tableName + ":5:4:2,4:", str); + ValidWriteIdList newList = new ValidReaderWriteIdList(); + newList.readFromString(str); + Assert.assertTrue(newList.isWriteIdValid(1)); + Assert.assertFalse(newList.isWriteIdValid(2)); + Assert.assertTrue(newList.isWriteIdValid(3)); + Assert.assertFalse(newList.isWriteIdValid(4)); + Assert.assertTrue(newList.isWriteIdValid(5)); + Assert.assertFalse(newList.isWriteIdValid(6)); + } + + @Test + public void longEnoughToCompress() throws Exception { + long[] exceptions = new long[1000]; + for (int i = 0; i < 1000; i++) exceptions[i] = i + 100; + ValidWriteIdList writeIdList = new ValidReaderWriteIdList(tableName, exceptions, new BitSet(), 2000, 900); + String str = writeIdList.writeToString(); + ValidWriteIdList newList = new ValidReaderWriteIdList(); + newList.readFromString(str); + for (int i = 0; i < 100; i++) Assert.assertTrue(newList.isWriteIdValid(i)); + for (int i = 100; i < 1100; i++) Assert.assertFalse(newList.isWriteIdValid(i)); + for (int i = 1100; i < 2001; i++) Assert.assertTrue(newList.isWriteIdValid(i)); + Assert.assertFalse(newList.isWriteIdValid(2001)); + } + + @Test + public void readWriteConfig() throws Exception { + long[] exceptions = new long[1000]; + for (int i = 0; i < 1000; i++) exceptions[i] = i + 100; + ValidWriteIdList writeIdList = new ValidReaderWriteIdList(tableName, exceptions, new BitSet(), 2000, 900); + String str = writeIdList.writeToString(); + Configuration conf = new Configuration(); + conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, str); + File tmpFile = File.createTempFile("TestValidTxnImpl", "readWriteConfig"); + DataOutputStream out = new DataOutputStream(new FileOutputStream(tmpFile)); + conf.write(out); + out.close(); + DataInputStream in = new DataInputStream(new FileInputStream(tmpFile)); + Configuration newConf = new Configuration(); + newConf.readFields(in); + Assert.assertEquals(str, newConf.get(ValidWriteIdList.VALID_WRITEIDS_KEY)); + } + + @Test + public void testAbortedTxn() throws Exception { + long[] exceptions = {2L, 4L, 6L, 8L, 10L}; + BitSet bitSet = new BitSet(exceptions.length); + bitSet.set(0); // mark txn "2L" aborted + bitSet.set(3); // mark txn "8L" aborted + ValidWriteIdList writeIdList = new ValidReaderWriteIdList(tableName, exceptions, bitSet, 11, 4L); + String str = writeIdList.writeToString(); + Assert.assertEquals(tableName + ":11:4:4,6,10:2,8", str); + Assert.assertTrue(writeIdList.isWriteIdAborted(2L)); + Assert.assertFalse(writeIdList.isWriteIdAborted(4L)); + Assert.assertFalse(writeIdList.isWriteIdAborted(6L)); + Assert.assertTrue(writeIdList.isWriteIdAborted(8L)); + Assert.assertFalse(writeIdList.isWriteIdAborted(10L)); + } +}