diff --git a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java index 9f64b3d..4f95d88 100644 --- a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java @@ -158,6 +158,10 @@ public static String txnIdToString(long txnId) { return "txnid:" + txnId; } + public static String writeIdToString(long writeId) { + return "writeid:" + writeId; + } + public static String txnIdsToString(List txnIds) { return "Transactions requested to be aborted: " + txnIds.toString(); } @@ -166,7 +170,7 @@ private JavaUtils() { // prevent instantiation } - public static Long extractTxnId(Path file) { + public static Long extractWriteId(Path file) { String fileName = file.getName(); String[] parts = fileName.split("_", 4); // e.g. delta_0000001_0000001_0000 or base_0000022 if (parts.length < 2 || !(DELTA_PREFIX.equals(parts[0]) || BASE_PREFIX.equals(parts[0]))) { diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 99e8457..a312224 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1093,7 +1093,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false, "Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)"), HIVESCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist", - "hive.txn.valid.txns,hive.script.operator.env.blacklist", + "hive.txn.valid.txns,hive.txn.tables.valid.writeids,hive.txn.valid.writeids,hive.script.operator.env.blacklist", "Comma separated list of keys from the configuration file not to convert to environment " + "variables when invoking the script operator"), HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT("hive.strict.checks.orderby.no.limit", false, diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java index 4ec10ad..924e233 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java @@ -73,8 +73,8 @@ private final AcidOutputFormat outf; private Object[] bucketFieldData; // Pre-allocated in constructor. Updated on each write. - private Long curBatchMinTxnId; - private Long curBatchMaxTxnId; + private Long curBatchMinWriteId; + private Long curBatchMaxWriteId; private static final class TableWriterPair { private final Table tbl; @@ -143,7 +143,7 @@ public TableWriterPair run() throws Exception { * used to tag error msgs to provied some breadcrumbs */ String getWatermark() { - return partitionPath + " txnIds[" + curBatchMinTxnId + "," + curBatchMaxTxnId + "]"; + return partitionPath + " writeIds[" + curBatchMinWriteId + "," + curBatchMaxWriteId + "]"; } // return the column numbers of the bucketed columns private List getBucketColIDs(List bucketCols, List cols) { @@ -207,15 +207,15 @@ public void clear() throws StreamingIOFailure { /** * Creates a new record updater for the new batch - * @param minTxnId smallest Txnid in the batch - * @param maxTxnID largest Txnid in the batch + * @param minWriteId smallest writeid in the batch + * @param maxWriteID largest writeid in the batch * @throws StreamingIOFailure if failed to create record updater */ @Override - public void newBatch(Long minTxnId, Long maxTxnID) + public void newBatch(Long minWriteId, Long maxWriteID) throws StreamingIOFailure, SerializationError { - curBatchMinTxnId = minTxnId; - curBatchMaxTxnId = maxTxnID; + curBatchMinWriteId = minWriteId; + curBatchMaxWriteId = maxWriteID; updaters = new ArrayList(totalBuckets); for (int bucket = 0; bucket < totalBuckets; bucket++) { updaters.add(bucket, null);//so that get(i) returns null rather than ArrayOutOfBounds @@ -265,7 +265,7 @@ public void closeBatch() throws StreamingIOFailure { return bucketFieldData; } - private RecordUpdater createRecordUpdater(int bucketId, Long minTxnId, Long maxTxnID) + private RecordUpdater createRecordUpdater(int bucketId, Long minWriteId, Long maxWriteID) throws IOException, SerializationError { try { // Initialize table properties from the table parameters. This is required because the table @@ -278,8 +278,8 @@ private RecordUpdater createRecordUpdater(int bucketId, Long minTxnId, Long maxT .inspector(getSerde().getObjectInspector()) .bucket(bucketId) .tableProperties(tblProperties) - .minimumTransactionId(minTxnId) - .maximumTransactionId(maxTxnID) + .minimumWriteId(minWriteId) + .maximumWriteId(maxWriteID) .statementId(-1) .finalDestination(partitionPath)); } catch (SerDeException e) { @@ -292,7 +292,7 @@ RecordUpdater getRecordUpdater(int bucketId) throws StreamingIOFailure, Serializ RecordUpdater recordUpdater = updaters.get(bucketId); if (recordUpdater == null) { try { - recordUpdater = createRecordUpdater(bucketId, curBatchMinTxnId, curBatchMaxTxnId); + recordUpdater = createRecordUpdater(bucketId, curBatchMinWriteId, curBatchMaxWriteId); } catch (IOException e) { String errMsg = "Failed creating RecordUpdater for " + getWatermark(); LOG.error(errMsg, e); diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java index 0a5492c..999c37e 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java @@ -255,16 +255,16 @@ private static boolean areFieldsInColOrder(int[] fieldToColMapping) { } @Override - public void write(long transactionId, byte[] record) + public void write(long writeId, byte[] record) throws SerializationError, StreamingIOFailure { try { byte[] orderedFields = reorderFields(record); Object encodedRow = encode(orderedFields); int bucket = getBucket(encodedRow); - getRecordUpdater(bucket).insert(transactionId, encodedRow); + getRecordUpdater(bucket).insert(writeId, encodedRow); } catch (IOException e) { - throw new StreamingIOFailure("Error writing record in transaction (" - + transactionId + ")", e); + throw new StreamingIOFailure("Error writing record in transaction write id (" + + writeId + ")", e); } } diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index 8943423..2dfc66e 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TxnAbortedException; +import org.apache.hadoop.hive.metastore.api.TxnToWriteId; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; @@ -558,7 +559,7 @@ private static IMetaStoreClient getMetaStoreClient(HiveEndPoint endPoint, HiveCo private final IMetaStoreClient msClient; private final IMetaStoreClient heartbeaterMSClient; private final RecordWriter recordWriter; - private final List txnIds; + private final List txnToWriteIds; //volatile because heartbeat() may be in a "different" thread; updates of this are "piggybacking" private volatile int currentTxnIndex = -1; @@ -609,14 +610,19 @@ private TransactionBatchImpl(final String user, UserGroupInformation ugi, HiveEn this.recordWriter = recordWriter; this.agentInfo = agentInfo; - txnIds = openTxnImpl(msClient, user, numTxns, ugi); + List txnIds = openTxnImpl(msClient, user, numTxns, ugi); + txnToWriteIds = allocateWriteIdsImpl(msClient, txnIds, ugi); + assert(txnToWriteIds.size() == numTxns); + txnStatus = new TxnState[numTxns]; for(int i = 0; i < txnStatus.length; i++) { + assert(txnToWriteIds.get(i).getTxnId() == txnIds.get(i)); txnStatus[i] = TxnState.OPEN;//Open matches Metastore state } - this.state = TxnState.INACTIVE; - recordWriter.newBatch(txnIds.get(0), txnIds.get(txnIds.size()-1)); + + // The Write Ids returned for the transaction batch is also sequential + recordWriter.newBatch(txnToWriteIds.get(0).getWriteId(), txnToWriteIds.get(numTxns-1).getWriteId()); success = true; } catch (TException e) { throw new TransactionBatchUnAvailable(endPt, e); @@ -642,9 +648,22 @@ public Object run() throws Exception { }) ; } + private List allocateWriteIdsImpl(final IMetaStoreClient msClient, final List txnIds, UserGroupInformation ugi) + throws IOException, TException, InterruptedException { + if(ugi==null) { + return msClient.allocateTableWriteIdsBatch(txnIds, endPt.database, endPt.table); + } + return (List) ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + return msClient.allocateTableWriteIdsBatch(txnIds, endPt.database, endPt.table); + } + }) ; + } + @Override public String toString() { - if (txnIds==null || txnIds.isEmpty()) { + if (txnToWriteIds==null || txnToWriteIds.isEmpty()) { return "{}"; } StringBuilder sb = new StringBuilder(" TxnStatus["); @@ -653,7 +672,8 @@ public String toString() { sb.append(state == null ? "N" : state); } sb.append("] LastUsed ").append(JavaUtils.txnIdToString(lastTxnUsed)); - return "TxnIds=[" + txnIds.get(0) + "..." + txnIds.get(txnIds.size()-1) + return "TxnIds=[" + txnToWriteIds.get(0).getTxnId() + "..." + + txnToWriteIds.get(txnToWriteIds.size()-1).getTxnId() + "] on endPoint = " + endPt + "; " + sb; } @@ -687,7 +707,7 @@ public Void run() throws TransactionError { private void beginNextTransactionImpl() throws TransactionError { state = TxnState.INACTIVE;//clear state from previous txn - if ( currentTxnIndex + 1 >= txnIds.size() ) + if ( currentTxnIndex + 1 >= txnToWriteIds.size() ) throw new InvalidTrasactionState("No more transactions available in" + " current batch for end point : " + endPt); ++currentTxnIndex; @@ -711,7 +731,19 @@ private void beginNextTransactionImpl() throws TransactionError { @Override public Long getCurrentTxnId() { if(currentTxnIndex >= 0) { - return txnIds.get(currentTxnIndex); + return txnToWriteIds.get(currentTxnIndex).getTxnId(); + } + return -1L; + } + + /** + * Get Id of currently open transaction + * @return -1 if there is no open TX + */ + @Override + public Long getCurrentWriteId() { + if(currentTxnIndex >= 0) { + return txnToWriteIds.get(currentTxnIndex).getWriteId(); } return -1L; } @@ -733,9 +765,9 @@ public TxnState getCurrentTransactionState() { @Override public int remainingTransactions() { if (currentTxnIndex>=0) { - return txnIds.size() - currentTxnIndex -1; + return txnToWriteIds.size() - currentTxnIndex -1; } - return txnIds.size(); + return txnToWriteIds.size(); } @@ -830,7 +862,7 @@ public Void run() throws StreamingException { private void writeImpl(Collection records) throws StreamingException { for (byte[] record : records) { - recordWriter.write(getCurrentTxnId(), record); + recordWriter.write(getCurrentWriteId(), record); } } @@ -875,7 +907,7 @@ public Void run() throws StreamingException { private void commitImpl() throws TransactionError, StreamingException { try { recordWriter.flush(); - msClient.commitTxn(txnIds.get(currentTxnIndex)); + msClient.commitTxn(txnToWriteIds.get(currentTxnIndex).getTxnId()); state = TxnState.COMMITTED; txnStatus[currentTxnIndex] = TxnState.COMMITTED; } catch (NoSuchTxnException e) { @@ -938,8 +970,8 @@ private void abortImpl(boolean abortAllRemaining) throws TransactionError, Strea int minOpenTxnIndex = Math.max(currentTxnIndex + (state == TxnState.ABORTED || state == TxnState.COMMITTED ? 1 : 0), 0); for(currentTxnIndex = minOpenTxnIndex; - currentTxnIndex < txnIds.size(); currentTxnIndex++) { - msClient.rollbackTxn(txnIds.get(currentTxnIndex)); + currentTxnIndex < txnToWriteIds.size(); currentTxnIndex++) { + msClient.rollbackTxn(txnToWriteIds.get(currentTxnIndex).getTxnId()); txnStatus[currentTxnIndex] = TxnState.ABORTED; } currentTxnIndex--;//since the loop left it == txnId.size() @@ -966,15 +998,15 @@ public void heartbeat() throws StreamingException, HeartBeatFailure { if(isClosed) { return; } - if(state != TxnState.OPEN && currentTxnIndex >= txnIds.size() - 1) { + if(state != TxnState.OPEN && currentTxnIndex >= txnToWriteIds.size() - 1) { //here means last txn in the batch is resolved but the close() hasn't been called yet so //there is nothing to heartbeat return; } //if here after commit()/abort() but before next beginNextTransaction(), currentTxnIndex still //points at the last txn which we don't want to heartbeat - Long first = txnIds.get(state == TxnState.OPEN ? currentTxnIndex : currentTxnIndex + 1); - Long last = txnIds.get(txnIds.size()-1); + Long first = txnToWriteIds.get(state == TxnState.OPEN ? currentTxnIndex : currentTxnIndex + 1).getTxnId(); + Long last = txnToWriteIds.get(txnToWriteIds.size()-1).getTxnId(); try { HeartbeatTxnRangeResponse resp = heartbeaterMSClient.heartbeatTxnRange(first, last); if (!resp.getAborted().isEmpty() || !resp.getNosuch().isEmpty()) { diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java index cddb8de..53352fe 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java @@ -23,10 +23,10 @@ /** Writes using a hive RecordUpdater * - * @param transactionId the ID of the Txn in which the write occurs + * @param writeId the write ID of the table mapping to Txn in which the write occurs * @param record the record to be written */ - public void write(long transactionId, byte[] record) throws StreamingException; + public void write(long writeId, byte[] record) throws StreamingException; /** Flush records from buffer. Invoked by TransactionBatch.commit() */ public void flush() throws StreamingException; @@ -36,7 +36,7 @@ /** Acquire a new RecordUpdater. Invoked when * StreamingConnection.fetchTransactionBatch() is called */ - public void newBatch(Long minTxnId, Long maxTxnID) throws StreamingException; + public void newBatch(Long minWriteId, Long maxWriteID) throws StreamingException; /** Close the RecordUpdater. Invoked by TransactionBatch.close() */ public void closeBatch() throws StreamingException; diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java index 357c537..4d92c55 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java @@ -117,15 +117,15 @@ protected HCatRecordObjectInspector getRecordObjectInspector() { @Override - public void write(long transactionId, byte[] record) + public void write(long writeId, byte[] record) throws StreamingIOFailure, SerializationError { try { Object encodedRow = encode(record); int bucket = getBucket(encodedRow); - getRecordUpdater(bucket).insert(transactionId, encodedRow); + getRecordUpdater(bucket).insert(writeId, encodedRow); } catch (IOException e) { - throw new StreamingIOFailure("Error writing record in transaction(" - + transactionId + ")", e); + throw new StreamingIOFailure("Error writing record in transaction write id(" + + writeId + ")", e); } } diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java index 58db252..ae25662 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java @@ -124,15 +124,15 @@ protected StructObjectInspector getRecordObjectInspector() { @Override - public void write(long transactionId, byte[] record) + public void write(long writeId, byte[] record) throws StreamingIOFailure, SerializationError { try { Object encodedRow = encode(record); int bucket = getBucket(encodedRow); - getRecordUpdater(bucket).insert(transactionId, encodedRow); + getRecordUpdater(bucket).insert(writeId, encodedRow); } catch (IOException e) { - throw new StreamingIOFailure("Error writing record in transaction(" - + transactionId + ")", e); + throw new StreamingIOFailure("Error writing record in transaction write id(" + + writeId + ")", e); } } diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java index e5ad475..c8ece3f 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java @@ -58,6 +58,13 @@ public String toString() { */ public Long getCurrentTxnId(); + + /** + * Get write Id mapping to currently open transaction + * @return write id + */ + public Long getCurrentWriteId(); + /** * get state of current transaction */ diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTable.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTable.java index 5b371e3..9fc9788 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTable.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTable.java @@ -34,7 +34,7 @@ private final String tableName; private final boolean createPartitions; private final TableType tableType; - private long transactionId; + private long writeId; private Table table; @@ -48,10 +48,10 @@ /** * Returns {@code 0} until such a time that a {@link Transaction} has been acquired (when * {@link MutatorClient#newTransaction()} exits), at which point this will return the - * {@link Transaction#getTransactionId() transaction id}. + * transaction id. */ - public long getTransactionId() { - return transactionId; + public long getWriteId() { + return writeId; } public String getDatabaseName() { @@ -105,8 +105,8 @@ public Table getTable() { return table; } - void setTransactionId(long transactionId) { - this.transactionId = transactionId; + void setWriteId(long writeId) { + this.writeId = writeId; } void setTable(Table table) { @@ -123,7 +123,7 @@ void setTable(Table table) { public String toString() { return "AcidTable [databaseName=" + databaseName + ", tableName=" + tableName + ", createPartitions=" + createPartitions + ", tableType=" + tableType + ", outputFormatName=" + getOutputFormatName() - + ", totalBuckets=" + getTotalBuckets() + ", transactionId=" + transactionId + "]"; + + ", totalBuckets=" + getTotalBuckets() + ", writeId=" + writeId + "]"; } } \ No newline at end of file diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTableSerializer.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTableSerializer.java index 32db5e3..bc1f6fa 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTableSerializer.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/AcidTableSerializer.java @@ -54,10 +54,10 @@ public static String encode(AcidTable table) throws IOException { data.writeUTF(table.getDatabaseName()); data.writeUTF(table.getTableName()); data.writeBoolean(table.createPartitions()); - if (table.getTransactionId() <= 0) { - LOG.warn("Transaction ID <= 0. The recipient is probably expecting a transaction ID."); + if (table.getWriteId() <= 0) { + LOG.warn("Write ID <= 0. The recipient is probably expecting a table write ID."); } - data.writeLong(table.getTransactionId()); + data.writeLong(table.getWriteId()); data.writeByte(table.getTableType().getId()); Table metaTable = table.getTable(); @@ -96,7 +96,7 @@ public static AcidTable decode(String encoded) throws IOException { int thriftLength = in.readInt(); table = new AcidTable(databaseName, tableName, createPartitions, tableType); - table.setTransactionId(transactionId); + table.setWriteId(transactionId); Table metaTable = null; if (thriftLength > 0) { diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java index 645274e..6fca66b 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hive.hcatalog.streaming.mutate.client.lock.Lock; import org.apache.hive.hcatalog.streaming.mutate.client.lock.LockFailureListener; import org.apache.thrift.TException; @@ -94,8 +95,19 @@ public Transaction newTransaction() throws TransactionException { throw new TransactionException("Not connected - cannot create transaction."); } Transaction transaction = new Transaction(metaStoreClient, lockOptions); + long txnId = transaction.getTransactionId(); for (AcidTable table : tables) { - table.setTransactionId(transaction.getTransactionId()); + try { + table.setWriteId(metaStoreClient.allocateTableWriteId(txnId, + table.getDatabaseName(), table.getTableName())); + } catch (TException ex) { + try { + metaStoreClient.rollbackTxn(txnId); + } catch (TException e) { + LOG.warn("Operation failed and rollback transaction {} failed due to {}", txnId, e.getMessage()); + } + throw new TransactionException("Unable to allocate table write ID", ex); + } } LOG.debug("Created transaction {}", transaction); return transaction; diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java index ae23153..5e804d7 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java @@ -98,11 +98,11 @@ } /** - * We expect records grouped by (partitionValues,bucketId) and ordered by (origTxnId,rowId). + * We expect records grouped by (partitionValues,bucketId) and ordered by (origWriteId,rowId). * * @throws BucketIdException The bucket ID in the {@link RecordIdentifier} of the record does not match that computed * using the values in the record's bucketed columns. - * @throws RecordSequenceException The record was submitted that was not in the correct ascending (origTxnId, rowId) + * @throws RecordSequenceException The record was submitted that was not in the correct ascending (origWriteId, rowId) * sequence. * @throws GroupRevisitedException If an event was submitted for a (partition, bucketId) combination that has already * been closed. @@ -120,11 +120,11 @@ public void insert(List partitionValues, Object record) throws WorkerExc } /** - * We expect records grouped by (partitionValues,bucketId) and ordered by (origTxnId,rowId). + * We expect records grouped by (partitionValues,bucketId) and ordered by (origWriteId,rowId). * * @throws BucketIdException The bucket ID in the {@link RecordIdentifier} of the record does not match that computed * using the values in the record's bucketed columns. - * @throws RecordSequenceException The record was submitted that was not in the correct ascending (origTxnId, rowId) + * @throws RecordSequenceException The record was submitted that was not in the correct ascending (origWriteId, rowId) * sequence. * @throws GroupRevisitedException If an event was submitted for a (partition, bucketId) combination that has already * been closed. @@ -142,11 +142,11 @@ public void update(List partitionValues, Object record) throws WorkerExc } /** - * We expect records grouped by (partitionValues,bucketId) and ordered by (origTxnId,rowId). + * We expect records grouped by (partitionValues,bucketId) and ordered by (origWriteId,rowId). * * @throws BucketIdException The bucket ID in the {@link RecordIdentifier} of the record does not match that computed * using the values in the record's bucketed columns. - * @throws RecordSequenceException The record was submitted that was not in the correct ascending (origTxnId, rowId) + * @throws RecordSequenceException The record was submitted that was not in the correct ascending (origWriteId, rowId) * sequence. * @throws GroupRevisitedException If an event was submitted for a (partition, bucketId) combination that has already * been closed. @@ -229,9 +229,9 @@ private void resetMutator(int newBucketId, List newPartitionValues, Path sequenceValidator.reset(); if (deleteDeltaIfExists) { // TODO: Should this be the concern of the mutator? - deleteDeltaIfExists(newPartitionPath, table.getTransactionId(), newBucketId); + deleteDeltaIfExists(newPartitionPath, table.getWriteId(), newBucketId); } - mutator = mutatorFactory.newMutator(outputFormat, table.getTransactionId(), newPartitionPath, newBucketId); + mutator = mutatorFactory.newMutator(outputFormat, table.getWriteId(), newPartitionPath, newBucketId); bucketId = newBucketId; partitionValues = newPartitionValues; partitionPath = newPartitionPath; @@ -282,12 +282,12 @@ private void validateRecordSequence(OperationType operationType, RecordIdentifie } /* A delta may be present from a previous failed task attempt. */ - private void deleteDeltaIfExists(Path partitionPath, long transactionId, int bucketId) throws IOException { + private void deleteDeltaIfExists(Path partitionPath, long writeId, int bucketId) throws IOException { Path deltaPath = AcidUtils.createFilename(partitionPath, new AcidOutputFormat.Options(configuration) .bucket(bucketId) - .minimumTransactionId(transactionId) - .maximumTransactionId(transactionId)); + .minimumWriteId(writeId) + .maximumWriteId(writeId)); FileSystem fileSystem = deltaPath.getFileSystem(configuration); if (fileSystem.exists(deltaPath)) { LOG.info("Deleting existing delta path: {}", deltaPath); diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorFactory.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorFactory.java index 22cd9b7..ce99ab6 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorFactory.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorFactory.java @@ -24,7 +24,7 @@ public interface MutatorFactory { - Mutator newMutator(AcidOutputFormat outputFormat, long transactionId, Path partitionPath, int bucketId) throws IOException; + Mutator newMutator(AcidOutputFormat outputFormat, long writeId, Path partitionPath, int bucketId) throws IOException; RecordInspector newRecordInspector(); diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java index 05cf8b7..84c477f 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java @@ -31,7 +31,7 @@ /** Base {@link Mutator} implementation. Creates a suitable {@link RecordUpdater} and delegates mutation events. */ public class MutatorImpl implements Mutator { - private final long transactionId; + private final long writeId; private final Path partitionPath; private final int bucketProperty; private final Configuration configuration; @@ -44,11 +44,11 @@ * @throws IOException */ public MutatorImpl(Configuration configuration, int recordIdColumn, ObjectInspector objectInspector, - AcidOutputFormat outputFormat, long transactionId, Path partitionPath, int bucketProperty) throws IOException { + AcidOutputFormat outputFormat, long writeId, Path partitionPath, int bucketProperty) throws IOException { this.configuration = configuration; this.recordIdColumn = recordIdColumn; this.objectInspector = objectInspector; - this.transactionId = transactionId; + this.writeId = writeId; this.partitionPath = partitionPath; this.bucketProperty = bucketProperty; @@ -57,17 +57,17 @@ public MutatorImpl(Configuration configuration, int recordIdColumn, ObjectInspec @Override public void insert(Object record) throws IOException { - updater.insert(transactionId, record); + updater.insert(writeId, record); } @Override public void update(Object record) throws IOException { - updater.update(transactionId, record); + updater.update(writeId, record); } @Override public void delete(Object record) throws IOException { - updater.delete(transactionId, record); + updater.delete(writeId, record); } /** @@ -89,7 +89,7 @@ public void close() throws IOException { @Override public String toString() { - return "ObjectInspectorMutator [transactionId=" + transactionId + ", partitionPath=" + partitionPath + return "ObjectInspectorMutator [writeId=" + writeId + ", partitionPath=" + partitionPath + ", bucketId=" + bucketProperty + "]"; } @@ -101,8 +101,8 @@ protected RecordUpdater createRecordUpdater(AcidOutputFormat outputFormat) new AcidOutputFormat.Options(configuration) .inspector(objectInspector) .bucket(bucketId) - .minimumTransactionId(transactionId) - .maximumTransactionId(transactionId) + .minimumWriteId(writeId) + .maximumWriteId(writeId) .recordIdColumn(recordIdColumn) .finalDestination(partitionPath) .statementId(-1)); diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/SequenceValidator.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/SequenceValidator.java index 5cd8081..97b5acc 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/SequenceValidator.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/SequenceValidator.java @@ -29,22 +29,22 @@ private static final Logger LOG = LoggerFactory.getLogger(SequenceValidator.class); - private Long lastTxId; + private Long lastWriteId; private Long lastRowId; SequenceValidator() { } boolean isInSequence(RecordIdentifier recordIdentifier) { - if (lastTxId != null && recordIdentifier.getTransactionId() < lastTxId) { - LOG.debug("Non-sequential transaction ID. Expected >{}, recordIdentifier={}", lastTxId, recordIdentifier); + if (lastWriteId != null && recordIdentifier.getWriteId() < lastWriteId) { + LOG.debug("Non-sequential transaction ID. Expected >{}, recordIdentifier={}", lastWriteId, recordIdentifier); return false; - } else if (lastTxId != null && recordIdentifier.getTransactionId() == lastTxId && lastRowId != null + } else if (lastWriteId != null && recordIdentifier.getWriteId() == lastWriteId && lastRowId != null && recordIdentifier.getRowId() <= lastRowId) { LOG.debug("Non-sequential row ID. Expected >{}, recordIdentifier={}", lastRowId, recordIdentifier); return false; } - lastTxId = recordIdentifier.getTransactionId(); + lastWriteId = recordIdentifier.getWriteId(); lastRowId = recordIdentifier.getRowId(); return true; } @@ -53,14 +53,14 @@ boolean isInSequence(RecordIdentifier recordIdentifier) { * Validator must be reset for each new partition and or bucket. */ void reset() { - lastTxId = null; + lastWriteId = null; lastRowId = null; LOG.debug("reset"); } @Override public String toString() { - return "SequenceValidator [lastTxId=" + lastTxId + ", lastRowId=" + lastRowId + "]"; + return "SequenceValidator [lastWriteId=" + lastWriteId + ", lastRowId=" + lastRowId + "]"; } } diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java index 5e12614..c5b96cd 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java @@ -45,7 +45,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.common.JavaUtils; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.Validator; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; @@ -375,16 +375,16 @@ public void testNoBuckets() throws Exception { Assert.assertEquals("", 0, BucketCodec.determineVersion(536870912).decodeWriterId(536870912)); rs = queryTable(driver,"select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID"); - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/delta_0000016_0000016_0000/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":18,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/delta_0000018_0000019/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":18,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/delta_0000018_0000019/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/delta_0000018_0000019/bucket_00000")); - Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\ta7\tb8")); - Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/delta_0000018_0000019/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/delta_0000001_0000001_0000/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); + Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":1}\ta7\tb8")); + Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); queryTable(driver, "update default.streamingnobuckets set a=0, b=0 where a='a7'"); queryTable(driver, "delete from default.streamingnobuckets where a='a1'"); @@ -399,14 +399,14 @@ public void testNoBuckets() throws Exception { runWorker(conf); rs = queryTable(driver,"select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID"); - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/base_0000022/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":18,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/base_0000022/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/base_0000022/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t0\t0")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/base_0000022/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/base_0000005/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/base_0000005/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/base_0000005/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":0}\t0\t0")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/base_0000005/bucket_00000")); } /** @@ -540,8 +540,8 @@ public void testTableValidation() throws Exception { */ private void checkDataWritten(Path partitionPath, long minTxn, long maxTxn, int buckets, int numExpectedFiles, String... records) throws Exception { - ValidTxnList txns = msClient.getValidTxns(); - AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, txns); + ValidWriteIdList writeIds = msClient.getValidWriteIds(AcidUtils.getFullTableName(dbName, tblName)); + AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, writeIds); Assert.assertEquals(0, dir.getObsolete().size()); Assert.assertEquals(0, dir.getOriginalFiles().size()); List current = dir.getCurrentDirectories(); @@ -553,8 +553,8 @@ private void checkDataWritten(Path partitionPath, long minTxn, long maxTxn, int long min = Long.MAX_VALUE; long max = Long.MIN_VALUE; for (AcidUtils.ParsedDelta pd : current) { - if (pd.getMaxTransaction() > max) max = pd.getMaxTransaction(); - if (pd.getMinTransaction() < min) min = pd.getMinTransaction(); + if (pd.getMaxWriteId() > max) max = pd.getMaxWriteId(); + if (pd.getMinWriteId() < min) min = pd.getMinWriteId(); } Assert.assertEquals(minTxn, min); Assert.assertEquals(maxTxn, max); @@ -567,7 +567,7 @@ private void checkDataWritten(Path partitionPath, long minTxn, long maxTxn, int job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string"); AcidUtils.setAcidTableScan(job,true); job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); - job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString()); + job.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIds.toString()); InputSplit[] splits = inf.getSplits(job, buckets); Assert.assertEquals(numExpectedFiles, splits.length); org.apache.hadoop.mapred.RecordReader rr = @@ -587,7 +587,7 @@ private void checkDataWritten(Path partitionPath, long minTxn, long maxTxn, int */ private void checkDataWritten2(Path partitionPath, long minTxn, long maxTxn, int numExpectedFiles, String validationQuery, boolean vectorize, String... records) throws Exception { - ValidTxnList txns = msClient.getValidTxns(); + ValidWriteIdList txns = msClient.getValidWriteIds(AcidUtils.getFullTableName(dbName, tblName)); AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, txns); Assert.assertEquals(0, dir.getObsolete().size()); Assert.assertEquals(0, dir.getOriginalFiles().size()); @@ -600,8 +600,8 @@ private void checkDataWritten2(Path partitionPath, long minTxn, long maxTxn, int long min = Long.MAX_VALUE; long max = Long.MIN_VALUE; for (AcidUtils.ParsedDelta pd : current) { - if (pd.getMaxTransaction() > max) max = pd.getMaxTransaction(); - if (pd.getMinTransaction() < min) min = pd.getMinTransaction(); + if (pd.getMaxWriteId() > max) max = pd.getMaxWriteId(); + if (pd.getMinWriteId() < min) min = pd.getMinWriteId(); } Assert.assertEquals(minTxn, min); Assert.assertEquals(maxTxn, max); @@ -625,8 +625,8 @@ private void checkDataWritten2(Path partitionPath, long minTxn, long maxTxn, int } private void checkNothingWritten(Path partitionPath) throws Exception { - ValidTxnList txns = msClient.getValidTxns(); - AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, txns); + ValidWriteIdList writeIds = msClient.getValidWriteIds(AcidUtils.getFullTableName(dbName, tblName)); + AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, writeIds); Assert.assertEquals(0, dir.getObsolete().size()); Assert.assertEquals(0, dir.getOriginalFiles().size()); List current = dir.getCurrentDirectories(); @@ -865,7 +865,7 @@ private void testTransactionBatchCommit_Delimited(UserGroupInformation ugi) thro txnBatch.write("1,Hello streaming".getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(TransactionBatch.TxnState.COMMITTED , txnBatch.getCurrentTransactionState()); @@ -877,11 +877,11 @@ private void testTransactionBatchCommit_Delimited(UserGroupInformation ugi) thro txnBatch.write("2,Welcome to streaming".getBytes()); // data should not be visible - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); txnBatch.commit(); - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); txnBatch.close(); @@ -933,7 +933,7 @@ private void testTransactionBatchCommit_Regex(UserGroupInformation ugi) throws E txnBatch.write("1,Hello streaming".getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(TransactionBatch.TxnState.COMMITTED , txnBatch.getCurrentTransactionState()); @@ -945,11 +945,11 @@ private void testTransactionBatchCommit_Regex(UserGroupInformation ugi) throws E txnBatch.write("2,Welcome to streaming".getBytes()); // data should not be visible - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); txnBatch.commit(); - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); txnBatch.close(); @@ -995,7 +995,7 @@ public void testTransactionBatchCommit_Json() throws Exception { txnBatch.write(rec1.getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 15, 24, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(TransactionBatch.TxnState.COMMITTED , txnBatch.getCurrentTransactionState()); @@ -1122,7 +1122,7 @@ public void testTransactionBatchAbortAndCommit() throws Exception { txnBatch.write("2,Welcome to streaming".getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 14, 23, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); txnBatch.close(); @@ -1141,13 +1141,13 @@ public void testMultipleTransactionBatchCommits() throws Exception { txnBatch.write("1,Hello streaming".getBytes()); txnBatch.commit(); String validationQuery = "select id, msg from " + dbName + "." + tblName + " order by id, msg"; - checkDataWritten2(partLoc, 15, 24, 1, validationQuery, false, "1\tHello streaming"); + checkDataWritten2(partLoc, 1, 10, 1, validationQuery, false, "1\tHello streaming"); txnBatch.beginNextTransaction(); txnBatch.write("2,Welcome to streaming".getBytes()); txnBatch.commit(); - checkDataWritten2(partLoc, 15, 24, 1, validationQuery, true, "1\tHello streaming", + checkDataWritten2(partLoc, 1, 10, 1, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming"); txnBatch.close(); @@ -1158,14 +1158,14 @@ public void testMultipleTransactionBatchCommits() throws Exception { txnBatch.write("3,Hello streaming - once again".getBytes()); txnBatch.commit(); - checkDataWritten2(partLoc, 15, 40, 2, validationQuery, false, "1\tHello streaming", + checkDataWritten2(partLoc, 1, 20, 2, validationQuery, false, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again"); txnBatch.beginNextTransaction(); txnBatch.write("4,Welcome to streaming - once again".getBytes()); txnBatch.commit(); - checkDataWritten2(partLoc, 15, 40, 2, validationQuery, true, "1\tHello streaming", + checkDataWritten2(partLoc, 1, 20, 2, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again", "4\tWelcome to streaming - once again"); @@ -1202,14 +1202,14 @@ public void testInterleavedTransactionBatchCommits() throws Exception { txnBatch2.commit(); String validationQuery = "select id, msg from " + dbName + "." + tblName + " order by id, msg"; - checkDataWritten2(partLoc, 24, 33, 1, + checkDataWritten2(partLoc, 11, 20, 1, validationQuery, true, "3\tHello streaming - once again"); txnBatch1.commit(); /*now both batches have committed (but not closed) so we for each primary file we expect a side file to exist and indicate the true length of primary file*/ FileSystem fs = partLoc.getFileSystem(conf); - AcidUtils.Directory dir = AcidUtils.getAcidState(partLoc, conf, msClient.getValidTxns()); + AcidUtils.Directory dir = AcidUtils.getAcidState(partLoc, conf, msClient.getValidWriteIds(AcidUtils.getFullTableName(dbName, tblName))); for(AcidUtils.ParsedDelta pd : dir.getCurrentDirectories()) { for(FileStatus stat : fs.listStatus(pd.getPath(), AcidUtils.bucketFileFilter)) { Path lengthFile = OrcAcidUtils.getSideFile(stat.getPath()); @@ -1222,7 +1222,7 @@ public void testInterleavedTransactionBatchCommits() throws Exception { Assert.assertTrue("", logicalLength == actualLength); } } - checkDataWritten2(partLoc, 14, 33, 2, + checkDataWritten2(partLoc, 1, 20, 2, validationQuery, false,"1\tHello streaming", "3\tHello streaming - once again"); txnBatch1.beginNextTransaction(); @@ -1234,7 +1234,7 @@ public void testInterleavedTransactionBatchCommits() throws Exception { //so each of 2 deltas has 1 bucket0 and 1 bucket0_flush_length. Furthermore, each bucket0 //has now received more data(logically - it's buffered) but it is not yet committed. //lets check that side files exist, etc - dir = AcidUtils.getAcidState(partLoc, conf, msClient.getValidTxns()); + dir = AcidUtils.getAcidState(partLoc, conf, msClient.getValidWriteIds(AcidUtils.getFullTableName(dbName, tblName))); for(AcidUtils.ParsedDelta pd : dir.getCurrentDirectories()) { for(FileStatus stat : fs.listStatus(pd.getPath(), AcidUtils.bucketFileFilter)) { Path lengthFile = OrcAcidUtils.getSideFile(stat.getPath()); @@ -1247,19 +1247,19 @@ public void testInterleavedTransactionBatchCommits() throws Exception { Assert.assertTrue("", logicalLength <= actualLength); } } - checkDataWritten2(partLoc, 14, 33, 2, + checkDataWritten2(partLoc, 1, 20, 2, validationQuery, true,"1\tHello streaming", "3\tHello streaming - once again"); txnBatch1.commit(); - checkDataWritten2(partLoc, 14, 33, 2, + checkDataWritten2(partLoc, 1, 20, 2, validationQuery, false, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again"); txnBatch2.commit(); - checkDataWritten2(partLoc, 14, 33, 2, + checkDataWritten2(partLoc, 1, 20, 2, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again", @@ -2268,8 +2268,8 @@ private FaultyWriter(RecordWriter delegate) { this.delegate = delegate; } @Override - public void write(long transactionId, byte[] record) throws StreamingException { - delegate.write(transactionId, record); + public void write(long writeId, byte[] record) throws StreamingException { + delegate.write(writeId, record); produceFault(); } @Override diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/ReflectiveMutatorFactory.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/ReflectiveMutatorFactory.java index e057da7..c05ddcf 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/ReflectiveMutatorFactory.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/ReflectiveMutatorFactory.java @@ -49,9 +49,9 @@ public ReflectiveMutatorFactory(Configuration configuration, Class recordClas } @Override - public Mutator newMutator(AcidOutputFormat outputFormat, long transactionId, Path partitionPath, int bucketId) + public Mutator newMutator(AcidOutputFormat outputFormat, long writeId, Path partitionPath, int bucketId) throws IOException { - return new MutatorImpl(configuration, recordIdColumn, objectInspector, outputFormat, transactionId, partitionPath, + return new MutatorImpl(configuration, recordIdColumn, objectInspector, outputFormat, writeId, partitionPath, bucketId); } diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java index c98d22b..e72b9fa 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingAssert.java @@ -25,7 +25,7 @@ import java.util.List; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -71,7 +71,7 @@ public StreamingAssert newStreamingAssert(Table table, List partition) t private List partition; private IMetaStoreClient metaStoreClient; private Directory dir; - private ValidTxnList txns; + private ValidWriteIdList writeIds; private List currentDeltas; private long min; private long max; @@ -83,9 +83,9 @@ public StreamingAssert newStreamingAssert(Table table, List partition) t this.table = table; this.partition = partition; - txns = metaStoreClient.getValidTxns(); + writeIds = metaStoreClient.getValidWriteIds(AcidUtils.getFullTableName(table.getDbName(), table.getTableName())); partitionLocation = getPartitionLocation(); - dir = AcidUtils.getAcidState(partitionLocation, conf, txns); + dir = AcidUtils.getAcidState(partitionLocation, conf, writeIds); assertEquals(0, dir.getObsolete().size()); assertEquals(0, dir.getOriginalFiles().size()); @@ -95,8 +95,8 @@ public StreamingAssert newStreamingAssert(Table table, List partition) t System.out.println("Files found: "); for (AcidUtils.ParsedDelta parsedDelta : currentDeltas) { System.out.println(parsedDelta.getPath().toString()); - max = Math.max(parsedDelta.getMaxTransaction(), max); - min = Math.min(parsedDelta.getMinTransaction(), min); + max = Math.max(parsedDelta.getMaxWriteId(), max); + min = Math.min(parsedDelta.getMinWriteId(), min); } } @@ -145,7 +145,7 @@ public void assertMaxTransactionId(long expectedMaxTransactionId) { job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string"); AcidUtils.setAcidTableScan(job,true); job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true); - job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString()); + job.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIds.toString()); InputSplit[] splits = inputFormat.getSplits(job, 1); assertEquals(numSplitsExpected, splits.length); @@ -160,7 +160,7 @@ public void assertMaxTransactionId(long expectedMaxTransactionId) { while (recordReader.next(key, value)) { RecordIdentifier recordIdentifier = recordReader.getRecordIdentifier(); - Record record = new Record(new RecordIdentifier(recordIdentifier.getTransactionId(), + Record record = new Record(new RecordIdentifier(recordIdentifier.getWriteId(), recordIdentifier.getBucketProperty(), recordIdentifier.getRowId()), value.toString()); System.out.println(record); records.add(record); diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestAcidTableSerializer.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestAcidTableSerializer.java index 7876e8d..1523a10 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestAcidTableSerializer.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestAcidTableSerializer.java @@ -45,7 +45,7 @@ public void testSerializeDeserialize() throws Exception { AcidTable acidTable = new AcidTable("db_1", "table_1", true, TableType.SINK); acidTable.setTable(table); - acidTable.setTransactionId(42L); + acidTable.setWriteId(42L); String encoded = AcidTableSerializer.encode(acidTable); System.out.println(encoded); @@ -57,7 +57,7 @@ public void testSerializeDeserialize() throws Exception { assertThat(decoded.getOutputFormatName(), is("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat")); assertThat(decoded.getTotalBuckets(), is(10)); assertThat(decoded.getQualifiedName(), is("DB_1.TABLE_1")); - assertThat(decoded.getTransactionId(), is(42L)); + assertThat(decoded.getWriteId(), is(42L)); assertThat(decoded.getTableType(), is(TableType.SINK)); assertThat(decoded.getTable(), is(table)); } @@ -75,7 +75,7 @@ public void testSerializeDeserializeNoTableNoTransaction() throws Exception { assertThat(decoded.getOutputFormatName(), is(nullValue())); assertThat(decoded.getTotalBuckets(), is(0)); assertThat(decoded.getQualifiedName(), is("DB_1.TABLE_1")); - assertThat(decoded.getTransactionId(), is(0L)); + assertThat(decoded.getWriteId(), is(0L)); assertThat(decoded.getTableType(), is(TableType.SINK)); assertThat(decoded.getTable(), is(nullValue())); } diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestMutatorClient.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestMutatorClient.java index cfe3a96..91b90ed 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestMutatorClient.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestMutatorClient.java @@ -48,6 +48,8 @@ public class TestMutatorClient { private static final long TRANSACTION_ID = 42L; + private static final long WRITE_ID1 = 78L; + private static final long WRITE_ID2 = 33L; private static final String TABLE_NAME_1 = "TABLE_1"; private static final String TABLE_NAME_2 = "TABLE_2"; private static final String DB_NAME = "DB_1"; @@ -89,6 +91,8 @@ public void configureMocks() throws Exception { when(mockParameters.get("transactional")).thenReturn(Boolean.TRUE.toString()); when(mockMetaStoreClient.openTxn(USER)).thenReturn(TRANSACTION_ID); + when(mockMetaStoreClient.allocateTableWriteId(TRANSACTION_ID, DB_NAME, TABLE_NAME_1)).thenReturn(WRITE_ID1); + when(mockMetaStoreClient.allocateTableWriteId(TRANSACTION_ID, DB_NAME, TABLE_NAME_2)).thenReturn(WRITE_ID2); client = new MutatorClient(mockMetaStoreClient, mockConfiguration, mockLockFailureListener, USER, Collections.singletonList(TABLE_1)); @@ -110,13 +114,13 @@ public void testCheckValidTableConnect() throws Exception { assertThat(outTables.get(0).getTableName(), is(TABLE_NAME_1)); assertThat(outTables.get(0).getTotalBuckets(), is(2)); assertThat(outTables.get(0).getOutputFormatName(), is(OrcOutputFormat.class.getName())); - assertThat(outTables.get(0).getTransactionId(), is(0L)); + assertThat(outTables.get(0).getWriteId(), is(0L)); assertThat(outTables.get(0).getTable(), is(mockTable1)); assertThat(outTables.get(1).getDatabaseName(), is(DB_NAME)); assertThat(outTables.get(1).getTableName(), is(TABLE_NAME_2)); assertThat(outTables.get(1).getTotalBuckets(), is(2)); assertThat(outTables.get(1).getOutputFormatName(), is(OrcOutputFormat.class.getName())); - assertThat(outTables.get(1).getTransactionId(), is(0L)); + assertThat(outTables.get(1).getWriteId(), is(0L)); assertThat(outTables.get(1).getTable(), is(mockTable2)); } @@ -179,8 +183,8 @@ public void testNewTransaction() throws Exception { assertThat(transaction.getTransactionId(), is(TRANSACTION_ID)); assertThat(transaction.getState(), is(TxnState.INACTIVE)); - assertThat(outTables.get(0).getTransactionId(), is(TRANSACTION_ID)); - assertThat(outTables.get(1).getTransactionId(), is(TRANSACTION_ID)); + assertThat(outTables.get(0).getWriteId(), is(WRITE_ID1)); + assertThat(outTables.get(1).getWriteId(), is(WRITE_ID2)); } @Test diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorCoordinator.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorCoordinator.java index d897477..fab56b3 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorCoordinator.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorCoordinator.java @@ -49,7 +49,7 @@ private static final List UNPARTITIONED = Collections. emptyList(); private static final List PARTITION_B = Arrays.asList("B"); private static final List PARTITION_A = Arrays.asList("A"); - private static final long TRANSACTION_ID = 2L; + private static final long WRITE_ID = 2L; private static final int BUCKET_ID = 0; private static final Path PATH_A = new Path("X"); private static final Path PATH_B = new Path("B"); @@ -84,7 +84,7 @@ public void createCoordinator() throws Exception { when(mockAcidTable.getOutputFormatName()).thenReturn(OrcOutputFormat.class.getName()); when(mockAcidTable.getTotalBuckets()).thenReturn(1); - when(mockAcidTable.getTransactionId()).thenReturn(TRANSACTION_ID); + when(mockAcidTable.getWriteId()).thenReturn(WRITE_ID); when(mockAcidTable.createPartitions()).thenReturn(true); when(mockMutatorFactory.newRecordInspector()).thenReturn(mockRecordInspector); when(mockMutatorFactory.newBucketIdResolver(anyInt())).thenReturn(mockBucketIdResolver); @@ -104,7 +104,7 @@ public void insert() throws Exception { coordinator.insert(UNPARTITIONED, RECORD); verify(mockPartitionHelper).createPartitionIfNotExists(UNPARTITIONED); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); verify(mockMutator).insert(RECORD); } @@ -115,7 +115,7 @@ public void multipleInserts() throws Exception { coordinator.insert(UNPARTITIONED, RECORD); verify(mockPartitionHelper).createPartitionIfNotExists(UNPARTITIONED); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); verify(mockMutator, times(3)).insert(RECORD); } @@ -129,8 +129,8 @@ public void insertPartitionChanges() throws Exception { verify(mockPartitionHelper).createPartitionIfNotExists(PARTITION_A); verify(mockPartitionHelper).createPartitionIfNotExists(PARTITION_B); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_B), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_B), eq(BUCKET_ID)); verify(mockMutator, times(2)).insert(RECORD); } @@ -143,9 +143,9 @@ public void bucketChanges() throws Exception { coordinator.update(UNPARTITIONED, RECORD); coordinator.delete(UNPARTITIONED, RECORD); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); verify(mockMutatorFactory) - .newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID + 1)); + .newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID + 1)); verify(mockMutator).update(RECORD); verify(mockMutator).delete(RECORD); } @@ -166,11 +166,11 @@ public void partitionThenBucketChanges() throws Exception { coordinator.update(PARTITION_B, RECORD); /* PbB1 */ verify(mockPartitionHelper).createPartitionIfNotExists(PARTITION_B); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); - verify(mockMutatorFactory, times(2)).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_B), + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory, times(2)).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_B), eq(BUCKET_ID)); verify(mockMutatorFactory) - .newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_B), eq(BUCKET_ID + 1)); + .newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_B), eq(BUCKET_ID + 1)); verify(mockMutator, times(2)).update(RECORD); verify(mockMutator).delete(RECORD); verify(mockMutator).insert(RECORD); @@ -197,7 +197,7 @@ public void outOfSequence() throws Exception { coordinator.delete(UNPARTITIONED, RECORD); verify(mockPartitionHelper).createPartitionIfNotExists(UNPARTITIONED); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); verify(mockMutator).update(RECORD); verify(mockMutator).delete(RECORD); } @@ -210,7 +210,7 @@ public void revisitGroup() throws Exception { coordinator.delete(UNPARTITIONED, RECORD); verify(mockPartitionHelper).createPartitionIfNotExists(UNPARTITIONED); - verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(TRANSACTION_ID), eq(PATH_A), eq(BUCKET_ID)); + verify(mockMutatorFactory).newMutator(any(OrcOutputFormat.class), eq(WRITE_ID), eq(PATH_A), eq(BUCKET_ID)); verify(mockMutator).update(RECORD); verify(mockMutator).delete(RECORD); } diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorImpl.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorImpl.java index 2273e06..d2c89e5 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorImpl.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorImpl.java @@ -49,7 +49,7 @@ private static final int RECORD_ID_COLUMN = 2; private static final int BUCKET_ID = 0; private static final Path PATH = new Path("X"); - private static final long TRANSACTION_ID = 1L; + private static final long WRITE_ID = 1L; @Mock private AcidOutputFormat mockOutputFormat; @@ -67,7 +67,7 @@ @Before public void injectMocks() throws IOException { when(mockOutputFormat.getRecordUpdater(eq(PATH), any(Options.class))).thenReturn(mockRecordUpdater); - mutator = new MutatorImpl(configuration, RECORD_ID_COLUMN, mockObjectInspector, mockOutputFormat, TRANSACTION_ID, + mutator = new MutatorImpl(configuration, RECORD_ID_COLUMN, mockObjectInspector, mockOutputFormat, WRITE_ID, PATH, BUCKET_ID); } @@ -79,26 +79,26 @@ public void testCreatesRecordReader() throws IOException { assertThat(options.getConfiguration(), is((Configuration) configuration)); assertThat(options.getInspector(), is(mockObjectInspector)); assertThat(options.getRecordIdColumn(), is(RECORD_ID_COLUMN)); - assertThat(options.getMinimumTransactionId(), is(TRANSACTION_ID)); - assertThat(options.getMaximumTransactionId(), is(TRANSACTION_ID)); + assertThat(options.getMinimumWriteId(), is(WRITE_ID)); + assertThat(options.getMaximumWriteId(), is(WRITE_ID)); } @Test public void testInsertDelegates() throws IOException { mutator.insert(RECORD); - verify(mockRecordUpdater).insert(TRANSACTION_ID, RECORD); + verify(mockRecordUpdater).insert(WRITE_ID, RECORD); } @Test public void testUpdateDelegates() throws IOException { mutator.update(RECORD); - verify(mockRecordUpdater).update(TRANSACTION_ID, RECORD); + verify(mockRecordUpdater).update(WRITE_ID, RECORD); } @Test public void testDeleteDelegates() throws IOException { mutator.delete(RECORD); - verify(mockRecordUpdater).delete(TRANSACTION_ID, RECORD); + verify(mockRecordUpdater).delete(WRITE_ID, RECORD); } @Test diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java index 7967a24..c0715c9 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java @@ -244,7 +244,7 @@ public void testNonStandardConversion01() throws Exception { {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t1\t2", AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/000000_0"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t3\t4", AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/000000_0"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t9\t10", AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "2/000000_0"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t70\t80", "delta_0000021_0000021_0000/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t70\t80", "delta_0000001_0000001_0000/bucket_00000"} }; Assert.assertEquals("Unexpected row count after update", expected2.length, rs.size()); //verify data and layout @@ -256,7 +256,7 @@ public void testNonStandardConversion01() throws Exception { FileSystem fs = FileSystem.get(hiveConf); FileStatus[] status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + (Table.NONACIDNONBUCKET).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); - String[] expectedDelDelta = {"delete_delta_0000021_0000021_0000", "delete_delta_0000022_0000022_0000"}; + String[] expectedDelDelta = {"delete_delta_0000001_0000001_0000", "delete_delta_0000002_0000002_0000"}; for(FileStatus stat : status) { for(int i = 0; i < expectedDelDelta.length; i++) { if(expectedDelDelta[i] != null && stat.getPath().toString().endsWith(expectedDelDelta[i])) { @@ -285,7 +285,7 @@ public void testNonStandardConversion01() throws Exception { //check we have right delete delta files after minor compaction status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + (Table.NONACIDNONBUCKET).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); - String[] expectedDelDelta2 = {"delete_delta_0000021_0000021_0000", "delete_delta_0000022_0000022_0000", "delete_delta_0000021_0000022"}; + String[] expectedDelDelta2 = {"delete_delta_0000001_0000001_0000", "delete_delta_0000002_0000002_0000", "delete_delta_0000001_0000002"}; for(FileStatus stat : status) { for(int i = 0; i < expectedDelDelta2.length; i++) { if(expectedDelDelta2[i] != null && stat.getPath().toString().endsWith(expectedDelDelta2[i])) { @@ -309,7 +309,7 @@ public void testNonStandardConversion01() throws Exception { for(int i = 0; i < expected2.length; i++) { Assert.assertTrue("Actual line " + i + " bc: " + rs.get(i), rs.get(i).startsWith(expected2[i][0])); //everything is now in base/ - Assert.assertTrue("Actual line(file) " + i + " bc: " + rs.get(i), rs.get(i).endsWith("base_0000022/bucket_00000")); + Assert.assertTrue("Actual line(file) " + i + " bc: " + rs.get(i), rs.get(i).endsWith("base_0000002/bucket_00000")); } } /** @@ -453,12 +453,12 @@ public void testCtasTezUnion() throws Exception { /* * Expected result 0th entry is the RecordIdentifier + data. 1st entry file before compact*/ String expected[][] = { - {"{\"transactionid\":18,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000018_0000018_0001/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000018_0000018_0001/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870913,\"rowid\":2}\t5\t6", "/delta_0000018_0000018_0001/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870914,\"rowid\":0}\t9\t10", "/delta_0000018_0000018_0002/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870914,\"rowid\":1}\t7\t8", "/delta_0000018_0000018_0002/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870914,\"rowid\":2}\t5\t6", "/delta_0000018_0000018_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":2}\t5\t6", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":0}\t9\t10", "/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":1}\t7\t8", "/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":2}\t5\t6", "/delta_0000001_0000001_0002/bucket_00000"}, }; Assert.assertEquals("Unexpected row count after ctas", expected.length, rs.size()); //verify data and layout @@ -475,10 +475,10 @@ public void testCtasTezUnion() throws Exception { LOG.warn(s); } String[][] expected2 = { - {"{\"transactionid\":18,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000018_0000018_0001/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000018_0000018_0001/bucket_00000"}, - {"{\"transactionid\":18,\"bucketid\":536870914,\"rowid\":0}\t9\t10", "/delta_0000018_0000018_0002/bucket_00000"}, - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":0}\t70\t80", "delta_0000020_0000020_0000/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":0}\t9\t10", "/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t70\t80", "delta_0000003_0000003_0000/bucket_00000"} }; Assert.assertEquals("Unexpected row count after update", expected2.length, rs.size()); //verify data and layout @@ -519,7 +519,7 @@ public void testCtasTezUnion() throws Exception { //check we have right delete delta files after minor compaction status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + (Table.ACIDNOBUCKET).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); - String[] expectedDelDelta2 = { "delete_delta_0000020_0000020_0000", "delete_delta_0000021_0000021_0000", "delete_delta_0000018_0000021"}; + String[] expectedDelDelta2 = { "delete_delta_0000003_0000003_0000", "delete_delta_0000004_0000004_0000", "delete_delta_0000001_0000004"}; for(FileStatus stat : status) { for(int i = 0; i < expectedDelDelta2.length; i++) { if(expectedDelDelta2[i] != null && stat.getPath().toString().endsWith(expectedDelDelta2[i])) { @@ -543,7 +543,7 @@ public void testCtasTezUnion() throws Exception { for(int i = 0; i < expected2.length; i++) { Assert.assertTrue("Actual line " + i + " bc: " + rs.get(i), rs.get(i).startsWith(expected2[i][0])); //everything is now in base/ - Assert.assertTrue("Actual line(file) " + i + " bc: " + rs.get(i), rs.get(i).endsWith("base_0000021/bucket_00000")); + Assert.assertTrue("Actual line(file) " + i + " bc: " + rs.get(i), rs.get(i).endsWith("base_0000004/bucket_00000")); } } /** @@ -638,17 +638,17 @@ public void testAcidInsertWithRemoveUnion() throws Exception { ├── HIVE_UNION_SUBDIR_1 │   └── 000000_0 │   ├── _orc_acid_version - │   └── delta_0000019_0000019_0001 + │   └── delta_0000001_0000001_0001 │   └── bucket_00000 ├── HIVE_UNION_SUBDIR_2 │   └── 000000_0 │   ├── _orc_acid_version - │   └── delta_0000019_0000019_0002 + │   └── delta_0000001_0000001_0002 │   └── bucket_00000 └── HIVE_UNION_SUBDIR_3 └── 000000_0 ├── _orc_acid_version - └── delta_0000019_0000019_0003 + └── delta_0000001_0000001_0003 └── bucket_00000 10 directories, 6 files */ @@ -660,11 +660,11 @@ public void testAcidInsertWithRemoveUnion() throws Exception { } String[][] expected2 = { - {"{\"transactionid\":19,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "warehouse/t/delta_0000019_0000019_0001/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "warehouse/t/delta_0000019_0000019_0001/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870914,\"rowid\":0}\t5\t6", "warehouse/t/delta_0000019_0000019_0002/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870914,\"rowid\":1}\t7\t8", "warehouse/t/delta_0000019_0000019_0002/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "warehouse/t/delta_0000019_0000019_0003/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "warehouse/t/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "warehouse/t/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":0}\t5\t6", "warehouse/t/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":1}\t7\t8", "warehouse/t/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "warehouse/t/delta_0000001_0000001_0003/bucket_00000"} }; Assert.assertEquals("Unexpected row count", expected2.length, rs.size()); for(int i = 0; i < expected2.length; i++) { @@ -688,11 +688,11 @@ public void testBucketedAcidInsertWithRemoveUnion() throws Exception { └── -ext-10000 ├── 000000_0 │   ├── _orc_acid_version - │   └── delta_0000021_0000021_0000 + │   └── delta_0000001_0000001_0000 │   └── bucket_00000 └── 000001_0 ├── _orc_acid_version - └── delta_0000021_0000021_0000 + └── delta_0000001_0000001_0000 └── bucket_00001 5 directories, 4 files @@ -705,11 +705,11 @@ public void testBucketedAcidInsertWithRemoveUnion() throws Exception { LOG.warn(s); } String[][] expected2 = { - {"{\"transactionid\":21,\"bucketid\":536936448,\"rowid\":0}\t1\t2", "warehouse/t/delta_0000021_0000021_0000/bucket_00001"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t2\t4", "warehouse/t/delta_0000021_0000021_0000/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536936448,\"rowid\":2}\t5\t6", "warehouse/t/delta_0000021_0000021_0000/bucket_00001"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t6\t8", "warehouse/t/delta_0000021_0000021_0000/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536936448,\"rowid\":1}\t9\t10", "warehouse/t/delta_0000021_0000021_0000/bucket_00001"} + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t2", "warehouse/t/delta_0000001_0000001_0000/bucket_00001"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t2\t4", "warehouse/t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":2}\t5\t6", "warehouse/t/delta_0000001_0000001_0000/bucket_00001"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t6\t8", "warehouse/t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t9\t10", "warehouse/t/delta_0000001_0000001_0000/bucket_00001"} }; Assert.assertEquals("Unexpected row count", expected2.length, rs.size()); for(int i = 0; i < expected2.length; i++) { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index 1305902..424f100 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -36,7 +36,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; @@ -662,17 +662,17 @@ public void minorCompactWhileStreaming() throws Exception { Path resultFile = null; for (int i = 0; i < names.length; i++) { names[i] = stat[i].getPath().getName(); - if (names[i].equals("delta_0000003_0000006")) { + if (names[i].equals("delta_0000001_0000004")) { resultFile = stat[i].getPath(); } } Arrays.sort(names); - String[] expected = new String[]{"delta_0000003_0000004", - "delta_0000003_0000006", "delta_0000005_0000006", "delta_0000007_0000008"}; + String[] expected = new String[]{"delta_0000001_0000002", + "delta_0000001_0000004", "delta_0000003_0000004", "delta_0000005_0000006"}; if (!Arrays.deepEquals(expected, names)) { Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names)); } - checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty, 0, 3L, 6L, 1); + checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); } finally { connection.close(); @@ -722,11 +722,11 @@ public void majorCompactWhileStreaming() throws Exception { FileStatus[] stat = fs.listStatus(new Path(table.getSd().getLocation()), AcidUtils.baseFileFilter); if (1 != stat.length) { - Assert.fail("Expecting 1 file \"base_0000006\" and found " + stat.length + " files " + Arrays.toString(stat)); + Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat)); } String name = stat[0].getPath().getName(); - Assert.assertEquals(name, "base_0000006"); - checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 3L, 6L, 1); + Assert.assertEquals(name, "base_0000004"); + checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); } finally { connection.close(); } @@ -782,17 +782,17 @@ public void minorCompactAfterAbort() throws Exception { Path resultDelta = null; for (int i = 0; i < names.length; i++) { names[i] = stat[i].getPath().getName(); - if (names[i].equals("delta_0000003_0000006")) { + if (names[i].equals("delta_0000001_0000004")) { resultDelta = stat[i].getPath(); } } Arrays.sort(names); - String[] expected = new String[]{"delta_0000003_0000004", - "delta_0000003_0000006", "delta_0000005_0000006"}; + String[] expected = new String[]{"delta_0000001_0000002", + "delta_0000001_0000004", "delta_0000003_0000004"}; if (!Arrays.deepEquals(expected, names)) { Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names)); } - checkExpectedTxnsPresent(null, new Path[]{resultDelta}, columnNamesProperty, columnTypesProperty, 0, 3L, 6L, 1); + checkExpectedTxnsPresent(null, new Path[]{resultDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); } finally { connection.close(); } @@ -848,13 +848,13 @@ public void majorCompactAfterAbort() throws Exception { Assert.fail("majorCompactAfterAbort FileStatus[] stat " + Arrays.toString(stat)); } if (1 != stat.length) { - Assert.fail("Expecting 1 file \"base_0000006\" and found " + stat.length + " files " + Arrays.toString(stat)); + Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat)); } String name = stat[0].getPath().getName(); - if (!name.equals("base_0000006")) { - Assert.fail("majorCompactAfterAbort name " + name + " not equals to base_0000006"); + if (!name.equals("base_0000004")) { + Assert.fail("majorCompactAfterAbort name " + name + " not equals to base_0000004"); } - checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 3L, 6L, 1); + checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); } finally { connection.close(); } @@ -903,11 +903,11 @@ public void majorCompactWhileStreamingForSplitUpdate() throws Exception { FileStatus[] stat = fs.listStatus(new Path(table.getSd().getLocation()), AcidUtils.baseFileFilter); if (1 != stat.length) { - Assert.fail("Expecting 1 file \"base_0000006\" and found " + stat.length + " files " + Arrays.toString(stat)); + Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat)); } String name = stat[0].getPath().getName(); - Assert.assertEquals(name, "base_0000006"); - checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 3L, 6L, 2); + Assert.assertEquals(name, "base_0000004"); + checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 2); } finally { connection.close(); } @@ -961,16 +961,16 @@ public void testMinorCompactionForSplitUpdateWithInsertsAndDeletes() throws Exce Path minorCompactedDelta = null; for (int i = 0; i < deltas.length; i++) { deltas[i] = stat[i].getPath().getName(); - if (deltas[i].equals("delta_0000003_0000005")) { + if (deltas[i].equals("delta_0000001_0000003")) { minorCompactedDelta = stat[i].getPath(); } } Arrays.sort(deltas); - String[] expectedDeltas = new String[]{"delta_0000003_0000003_0000", "delta_0000003_0000005", "delta_0000004_0000004_0000"}; + String[] expectedDeltas = new String[]{"delta_0000001_0000001_0000", "delta_0000001_0000003", "delta_0000002_0000002_0000"}; if (!Arrays.deepEquals(expectedDeltas, deltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas)); } - checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 3L, 4L, 1); + checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 2L, 1); // Verify that we have got correct set of delete_deltas. FileStatus[] deleteDeltaStat = @@ -979,16 +979,16 @@ public void testMinorCompactionForSplitUpdateWithInsertsAndDeletes() throws Exce Path minorCompactedDeleteDelta = null; for (int i = 0; i < deleteDeltas.length; i++) { deleteDeltas[i] = deleteDeltaStat[i].getPath().getName(); - if (deleteDeltas[i].equals("delete_delta_0000003_0000005")) { + if (deleteDeltas[i].equals("delete_delta_0000001_0000003")) { minorCompactedDeleteDelta = deleteDeltaStat[i].getPath(); } } Arrays.sort(deleteDeltas); - String[] expectedDeleteDeltas = new String[]{"delete_delta_0000003_0000005", "delete_delta_0000005_0000005_0000"}; + String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000003", "delete_delta_0000003_0000003_0000"}; if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas)); } - checkExpectedTxnsPresent(null, new Path[]{minorCompactedDeleteDelta}, columnNamesProperty, columnTypesProperty, 0, 4L, 4L, 1); + checkExpectedTxnsPresent(null, new Path[]{minorCompactedDeleteDelta}, columnNamesProperty, columnTypesProperty, 0, 2L, 2L, 1); } @Test @@ -1038,16 +1038,16 @@ public void testMinorCompactionForSplitUpdateWithOnlyInserts() throws Exception Path minorCompactedDelta = null; for (int i = 0; i < deltas.length; i++) { deltas[i] = stat[i].getPath().getName(); - if (deltas[i].equals("delta_0000003_0000004")) { + if (deltas[i].equals("delta_0000001_0000002")) { minorCompactedDelta = stat[i].getPath(); } } Arrays.sort(deltas); - String[] expectedDeltas = new String[]{"delta_0000003_0000003_0000", "delta_0000003_0000004", "delta_0000004_0000004_0000"}; + String[] expectedDeltas = new String[]{"delta_0000001_0000001_0000", "delta_0000001_0000002", "delta_0000002_0000002_0000"}; if (!Arrays.deepEquals(expectedDeltas, deltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas)); } - checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 3L, 4L, 1); + checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 2L, 1); // Verify that we have got correct set of delete_deltas. FileStatus[] deleteDeltaStat = @@ -1056,12 +1056,12 @@ public void testMinorCompactionForSplitUpdateWithOnlyInserts() throws Exception Path minorCompactedDeleteDelta = null; for (int i = 0; i < deleteDeltas.length; i++) { deleteDeltas[i] = deleteDeltaStat[i].getPath().getName(); - if (deleteDeltas[i].equals("delete_delta_0000003_0000004")) { + if (deleteDeltas[i].equals("delete_delta_0000001_0000002")) { minorCompactedDeleteDelta = deleteDeltaStat[i].getPath(); } } Arrays.sort(deleteDeltas); - String[] expectedDeleteDeltas = new String[]{"delete_delta_0000003_0000004"}; + String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000002"}; if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas)); } @@ -1115,17 +1115,17 @@ public void minorCompactWhileStreamingWithSplitUpdate() throws Exception { Path resultFile = null; for (int i = 0; i < names.length; i++) { names[i] = stat[i].getPath().getName(); - if (names[i].equals("delta_0000003_0000006")) { + if (names[i].equals("delta_0000001_0000004")) { resultFile = stat[i].getPath(); } } Arrays.sort(names); - String[] expected = new String[]{"delta_0000003_0000004", - "delta_0000003_0000006", "delta_0000005_0000006", "delta_0000007_0000008"}; + String[] expected = new String[]{"delta_0000001_0000002", + "delta_0000001_0000004", "delta_0000003_0000004", "delta_0000005_0000006"}; if (!Arrays.deepEquals(expected, names)) { Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names)); } - checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty, 0, 3L, 6L, 1); + checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); // Verify that we have got correct set of delete_deltas also FileStatus[] deleteDeltaStat = @@ -1134,12 +1134,12 @@ public void minorCompactWhileStreamingWithSplitUpdate() throws Exception { Path minorCompactedDeleteDelta = null; for (int i = 0; i < deleteDeltas.length; i++) { deleteDeltas[i] = deleteDeltaStat[i].getPath().getName(); - if (deleteDeltas[i].equals("delete_delta_0000003_0000006")) { + if (deleteDeltas[i].equals("delete_delta_0000001_0000004")) { minorCompactedDeleteDelta = deleteDeltaStat[i].getPath(); } } Arrays.sort(deleteDeltas); - String[] expectedDeleteDeltas = new String[]{"delete_delta_0000003_0000006"}; + String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000004"}; if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas)); } @@ -1331,14 +1331,19 @@ private void writeBatch(StreamingConnection connection, DelimitedInputWriter wri private void checkExpectedTxnsPresent(Path base, Path[] deltas, String columnNamesProperty, String columnTypesProperty, int bucket, long min, long max, int numBuckets) throws IOException { - ValidTxnList txnList = new ValidTxnList() { + ValidWriteIdList writeIdList = new ValidWriteIdList() { @Override - public boolean isTxnValid(long txnid) { + public String getTableName() { + return "AcidTable"; + } + + @Override + public boolean isWriteIdValid(long writeid) { return true; } @Override - public RangeResponse isTxnRangeValid(long minTxnId, long maxTxnId) { + public RangeResponse isWriteIdRangeValid(long minWriteId, long maxWriteId) { return RangeResponse.ALL; } @@ -1353,7 +1358,7 @@ public void readFromString(String src) { } @Override - public Long getMinOpenTxn() { return null; } + public Long getMinOpenWriteId() { return null; } @Override public long getHighWatermark() { @@ -1361,7 +1366,7 @@ public long getHighWatermark() { } @Override - public long[] getInvalidTransactions() { + public long[] getInvalidWriteIds() { return new long[0]; } @Override @@ -1370,12 +1375,12 @@ public boolean isValidBase(long txnid) { } @Override - public boolean isTxnAborted(long txnid) { + public boolean isWriteIdAborted(long txnid) { return true; } @Override - public RangeResponse isTxnRangeAborted(long minTxnId, long maxTxnId) { + public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) { return RangeResponse.ALL; } }; @@ -1388,18 +1393,18 @@ public RangeResponse isTxnRangeAborted(long minTxnId, long maxTxnId) { conf.set(hive_metastoreConstants.BUCKET_COUNT, Integer.toString(numBuckets)); HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_TABLE_SCAN, true); AcidInputFormat.RawReader reader = - aif.getRawReader(conf, true, bucket, txnList, base, deltas); + aif.getRawReader(conf, true, bucket, writeIdList, base, deltas); RecordIdentifier identifier = reader.createKey(); OrcStruct value = reader.createValue(); long currentTxn = min; boolean seenCurrentTxn = false; while (reader.next(identifier, value)) { if (!seenCurrentTxn) { - Assert.assertEquals(currentTxn, identifier.getTransactionId()); + Assert.assertEquals(currentTxn, identifier.getWriteId()); seenCurrentTxn = true; } - if (currentTxn != identifier.getTransactionId()) { - Assert.assertEquals(currentTxn + 1, identifier.getTransactionId()); + if (currentTxn != identifier.getWriteId()) { + Assert.assertEquals(currentTxn + 1, identifier.getWriteId()); currentTxn++; } } diff --git a/metastore/scripts/upgrade/derby/050-HIVE-18192.derby.sql b/metastore/scripts/upgrade/derby/050-HIVE-18192.derby.sql new file mode 100644 index 0000000..de090a3 --- /dev/null +++ b/metastore/scripts/upgrade/derby/050-HIVE-18192.derby.sql @@ -0,0 +1,14 @@ +CREATE TABLE TXN_TO_WRITE_ID ( + T2W_TXNID bigint, + T2W_TABLE varchar(256) NOT NULL, + T2W_WRITEID bigint NOT NULL +); + +CREATE TABLE NEXT_WRITE_ID ( + NWI_TABLE varchar(256) NOT NULL, + NWI_NEXT bigint NOT NULL +); + +RENAME COLUMN COMPACTION_QUEUE.CQ_HIGHEST_TXN_ID TO CQ_HIGHEST_WRITE_ID; + +RENAME COLUMN COMPLETED_COMPACTIONS.CC_HIGHEST_TXN_ID TO CC_HIGHEST_WRITE_ID; diff --git a/metastore/scripts/upgrade/derby/hive-txn-schema-3.0.0.derby.sql b/metastore/scripts/upgrade/derby/hive-txn-schema-3.0.0.derby.sql index d72b06c..fe89bfa 100644 --- a/metastore/scripts/upgrade/derby/hive-txn-schema-3.0.0.derby.sql +++ b/metastore/scripts/upgrade/derby/hive-txn-schema-3.0.0.derby.sql @@ -55,6 +55,17 @@ CREATE TABLE NEXT_TXN_ID ( ); INSERT INTO NEXT_TXN_ID VALUES(1); +CREATE TABLE TXN_TO_WRITE_ID ( + T2W_TXNID bigint, + T2W_TABLE varchar(256) NOT NULL, + T2W_WRITEID bigint NOT NULL +); + +CREATE TABLE NEXT_WRITE_ID ( + NWI_TABLE varchar(256) NOT NULL, + NWI_NEXT bigint NOT NULL +); + CREATE TABLE HIVE_LOCKS ( HL_LOCK_EXT_ID bigint NOT NULL, HL_LOCK_INT_ID bigint NOT NULL, @@ -93,7 +104,7 @@ CREATE TABLE COMPACTION_QUEUE ( CQ_WORKER_ID varchar(128), CQ_START bigint, CQ_RUN_AS varchar(128), - CQ_HIGHEST_TXN_ID bigint, + CQ_HIGHEST_WRITE_ID bigint, CQ_META_INFO varchar(2048) for bit data, CQ_HADOOP_JOB_ID varchar(32) ); @@ -115,7 +126,7 @@ CREATE TABLE COMPLETED_COMPACTIONS ( CC_START bigint, CC_END bigint, CC_RUN_AS varchar(128), - CC_HIGHEST_TXN_ID bigint, + CC_HIGHEST_WRITE_ID bigint, CC_META_INFO varchar(2048) for bit data, CC_HADOOP_JOB_ID varchar(32) ); diff --git a/metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql b/metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql index 3a11881..55b89e7 100644 --- a/metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql +++ b/metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql @@ -7,5 +7,6 @@ RUN '045-HIVE-16886.derby.sql'; RUN '046-HIVE-17566.derby.sql'; RUN '048-HIVE-14498.derby.sql'; RUN '049-HIVE-18489.derby.sql'; +RUN '050-HIVE-18192.derby.sql'; UPDATE "APP".VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 74595b0..3fb6c1a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -42,8 +42,8 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hive.common.JavaUtils; -import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.metrics.common.Metrics; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; @@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.exec.DagUtils; import org.apache.hadoop.hive.ql.exec.ExplainTask; import org.apache.hadoop.hive.ql.exec.FetchTask; +import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -74,6 +75,7 @@ import org.apache.hadoop.hive.ql.hooks.HooksLoader; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.lockmgr.HiveLock; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.lockmgr.LockException; @@ -1213,27 +1215,67 @@ public FetchTask getFetchTask() { // Write the current set of valid transactions into the conf file so that it can be read by // the input format. private void recordValidTxns(HiveTxnManager txnMgr) throws LockException { - ValidTxnList oldList = null; - String s = conf.get(ValidTxnList.VALID_TXNS_KEY); + ValidTxnWriteIdList oldList = null; + String s = conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); if(s != null && s.length() > 0) { - oldList = new ValidReadTxnList(s); + oldList = new ValidTxnWriteIdList(s); } - ValidTxnList txns = txnMgr.getValidTxns(); + // TODO (Sankar): Need to set ValidTxnList for some callers which need snapshot + // Get the current set of valid write IDs for the current transaction and write it into the + // conf file so that it can be read by the input format. + ValidTxnWriteIdList txnWriteIds = txnMgr.getValidWriteIds(getTransactionalTableList(plan)); if(oldList != null) { throw new IllegalStateException("calling recordValidTxn() more than once in the same " + JavaUtils.txnIdToString(txnMgr.getCurrentTxnId())); } - String txnStr = txns.toString(); - conf.set(ValidTxnList.VALID_TXNS_KEY, txnStr); - if(plan.getFetchTask() != null) { + String writeIdStr = txnWriteIds.toString(); + conf.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, writeIdStr); + conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIdStr); + if (plan.getFetchTask() != null) { /** * This is needed for {@link HiveConf.ConfVars.HIVEFETCHTASKCONVERSION} optimization which * initializes JobConf in FetchOperator before recordValidTxns() but this has to be done * after locks are acquired to avoid race conditions in ACID. + * This case is supported only for single source query. */ - plan.getFetchTask().setValidTxnList(txnStr); + Operator source = plan.getFetchTask().getWork().getSource(); + if (source instanceof TableScanOperator) { + TableScanOperator tsOp = (TableScanOperator)source; + ValidWriteIdList writeIdList = txnWriteIds.getTableWriteIdList( + AcidUtils.getFullTableName(tsOp.getConf().getDatabaseName(), tsOp.getConf().getTableName())); + plan.getFetchTask().setValidWriteIdList(writeIdList.toString()); + } + } + LOG.debug("Encoding valid write ids info " + writeIdStr + " txnid:" + txnMgr.getCurrentTxnId()); + } + + // Make the list of transactional tables list which are getting read or written by current txn + private List getTransactionalTableList(QueryPlan plan) { + List tableList = new ArrayList<>(); + + for (ReadEntity input : plan.getInputs()) { + addTableFromEntity(input, tableList); + } + return tableList; + } + + private void addTableFromEntity(Entity entity, List tableList) { + Table tbl; + switch (entity.getType()) { + case TABLE: + tbl = entity.getTable(); + break; + case PARTITION: + case DUMMYPARTITION: + tbl = entity.getPartition().getTable(); + break; + default: + return; + } + String fullTableName = AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName()); + if (AcidUtils.isTransactionalTable(tbl) && !tableList.contains(fullTableName)) { + tableList.add(fullTableName); } - LOG.debug("Encoding valid txns info " + txnStr + " txnid:" + txnMgr.getCurrentTxnId()); } private String getUserFromUGI() { @@ -1276,7 +1318,7 @@ private void acquireLocks() throws CommandProcessorResponse { if(userFromUGI == null) { throw createProcessorResponse(10); } - // Set the transaction id in all of the acid file sinks + // Set the table write id in all of the acid file sinks if (haveAcidWrite()) { List acidSinks = new ArrayList<>(plan.getAcidSinks()); //sorting makes tests easier to write since file names and ROW__IDs depend on statementId @@ -1284,10 +1326,14 @@ private void acquireLocks() throws CommandProcessorResponse { acidSinks.sort((FileSinkDesc fsd1, FileSinkDesc fsd2) -> fsd1.getDirName().compareTo(fsd2.getDirName())); for (FileSinkDesc desc : acidSinks) { - desc.setTransactionId(queryTxnMgr.getCurrentTxnId()); + TableDesc tableInfo = desc.getTableInfo(); + long writeId = queryTxnMgr.getTableWriteId(Utilities.getDatabaseName(tableInfo.getTableName()), + Utilities.getTableName(tableInfo.getTableName())); + desc.setTableWriteId(writeId); + //it's possible to have > 1 FileSink writing to the same table/partition //e.g. Merge stmt, multi-insert stmt when mixing DP and SP writes - desc.setStatementId(queryTxnMgr.getWriteIdAndIncrement()); + desc.setStatementId(queryTxnMgr.getStmtIdAndIncrement()); } } /*It's imperative that {@code acquireLocks()} is called for all commands so that @@ -1336,7 +1382,7 @@ public void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnMa } // If we've opened a transaction we need to commit or rollback rather than explicitly // releasing the locks. - conf.unset(ValidTxnList.VALID_TXNS_KEY); + conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); if(!checkConcurrency()) { return; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java index cf19351..df84417 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java @@ -260,7 +260,7 @@ public void closeOp(boolean abort) throws HiveException { // There's always just one file that we have merged. // The union/DP/etc. should already be account for in the path. Utilities.writeMmCommitManifest(Lists.newArrayList(outPath), - tmpPath.getParent(), fs, taskId, conf.getTxnId(), conf.getStmtId(), null, false); + tmpPath.getParent(), fs, taskId, conf.getWriteId(), conf.getStmtId(), null, false); LOG.info("Merged into " + finalPath + "(" + fss.getLen() + " bytes)."); } } @@ -322,7 +322,7 @@ public void jobCloseOp(Configuration hconf, boolean success) try { Path outputDir = conf.getOutputPath(); FileSystem fs = outputDir.getFileSystem(hconf); - Long mmWriteId = conf.getTxnId(); + Long mmWriteId = conf.getWriteId(); int stmtId = conf.getStmtId(); if (!isMmTable) { Path backupPath = backupOutputPath(fs, outputDir); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 51ef390..63762d5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; @@ -4417,7 +4418,7 @@ private void checkMmLb(Partition part) throws HiveException { } private void handleRemoveMm( - Path path, ValidTxnList validTxnList, List result) throws HiveException { + Path path, ValidWriteIdList validWriteIdList, List result) throws HiveException { // Note: doesn't take LB into account; that is not presently supported here (throws above). try { FileSystem fs = path.getFileSystem(conf); @@ -4427,10 +4428,10 @@ private void handleRemoveMm( ensureDelete(fs, childPath, "a non-directory file"); continue; } - Long writeId = JavaUtils.extractTxnId(childPath); + Long writeId = JavaUtils.extractWriteId(childPath); if (writeId == null) { ensureDelete(fs, childPath, "an unknown directory"); - } else if (!validTxnList.isTxnValid(writeId)) { + } else if (!validWriteIdList.isWriteIdValid(writeId)) { // Assume no concurrent active writes - we rely on locks here. We could check and fail. ensureDelete(fs, childPath, "an uncommitted directory"); } else { @@ -4465,9 +4466,10 @@ private static void ensureDelete(FileSystem fs, Path path, String what) throws I try { HiveTxnManager txnManager = SessionState.get().getTxnMgr(); if (txnManager.isTxnOpen()) { - mmWriteId = txnManager.getCurrentTxnId(); + mmWriteId = txnManager.getTableWriteId(tbl.getDbName(), tbl.getTableName()); } else { - mmWriteId = txnManager.openTxn(new Context(conf), conf.getUser()); + txnManager.openTxn(new Context(conf), conf.getUser()); + mmWriteId = txnManager.getTableWriteId(tbl.getDbName(), tbl.getTableName()); txnManager.commitTxn(); } } catch (Exception e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index 97e1e36..01654e2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -37,8 +37,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -151,8 +151,8 @@ public FetchOperator(FetchWork work, JobConf job, Operator operator, initialize(); } - public void setValidTxnList(String txnStr) { - job.set(ValidTxnList.VALID_TXNS_KEY, txnStr); + public void setValidWriteIdList(String writeIdStr) { + job.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIdStr); } private void initialize() throws HiveException { if (isStatReader) { @@ -407,17 +407,17 @@ private String processCurrPathForMmWriteIds(InputFormat inputFormat) throws IOEx if (inputFormat instanceof HiveInputFormat) { return StringUtils.escapeString(currPath.toString()); // No need to process here. } - ValidTxnList validTxnList; + ValidWriteIdList validWriteIdList; if (AcidUtils.isInsertOnlyTable(currDesc.getTableDesc().getProperties())) { - validTxnList = extractValidTxnList(); + validWriteIdList = extractValidTxnList(); } else { - validTxnList = null; // non-MM case + validWriteIdList = null; // non-MM case } - if (validTxnList != null) { + if (validWriteIdList != null) { Utilities.FILE_OP_LOGGER.info("Processing " + currDesc.getTableName() + " for MM paths"); } - Path[] dirs = HiveInputFormat.processPathsForMmRead(Lists.newArrayList(currPath), job, validTxnList); + Path[] dirs = HiveInputFormat.processPathsForMmRead(Lists.newArrayList(currPath), job, validWriteIdList); if (dirs == null || dirs.length == 0) { return null; // No valid inputs. This condition is logged inside the call. } @@ -428,10 +428,10 @@ private String processCurrPathForMmWriteIds(InputFormat inputFormat) throws IOEx return str.toString(); } - private ValidTxnList extractValidTxnList() { + private ValidWriteIdList extractValidTxnList() { if (currDesc.getTableName() == null || !org.apache.commons.lang.StringUtils.isBlank(currDesc.getTableName())) { - String txnString = job.get(ValidTxnList.VALID_TXNS_KEY); - return txnString == null ? new ValidReadTxnList() : new ValidReadTxnList(txnString); + String txnString = job.get(ValidWriteIdList.VALID_WRITEIDS_KEY); + return txnString == null ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString); } return null; // not fetching from a table directly but from a temp location } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java index a7dace9..519bee3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java @@ -57,8 +57,8 @@ public FetchTask() { super(); } - public void setValidTxnList(String txnStr) { - fetch.setValidTxnList(txnStr); + public void setValidWriteIdList(String writeIdStr) { + fetch.setValidWriteIdList(writeIdStr); } @Override public void initialize(QueryState queryState, QueryPlan queryPlan, DriverContext ctx, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 98bb938..da31cef 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -173,7 +173,7 @@ int acidLastBucket = -1; int acidFileOffset = -1; private boolean isMmTable; - private Long txnId; + private Long writeId; private int stmtId; String dpDir; @@ -185,7 +185,7 @@ public FSPaths(Path specPath, boolean isMmTable) { } else { tmpPath = specPath; taskOutputTempPath = null; // Should not be used. - txnId = conf.getTransactionId(); + writeId = conf.getTableWriteId(); stmtId = conf.getStatementId(); } if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { @@ -337,7 +337,7 @@ public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeT } outPaths[filesIdx] = getTaskOutPath(taskId); } else { - String subdirPath = AcidUtils.baseOrDeltaSubdir(conf.getInsertOverwrite(), txnId, txnId, stmtId); + String subdirPath = AcidUtils.baseOrDeltaSubdir(conf.getInsertOverwrite(), writeId, writeId, stmtId); if (unionPath != null) { // Create the union directory inside the MM directory. subdirPath += Path.SEPARATOR + unionPath; @@ -961,7 +961,7 @@ public void process(Object row, int tag) throws HiveException { if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || conf.isMmTable()) { rowOutWriters[findWriterOffset(row)].write(recordValue); } else if (conf.getWriteType() == AcidUtils.Operation.INSERT) { - fpaths.updaters[findWriterOffset(row)].insert(conf.getTransactionId(), row); + fpaths.updaters[findWriterOffset(row)].insert(conf.getTableWriteId(), row); } else { // TODO I suspect we could skip much of the stuff above this in the function in the case // of update and delete. But I don't understand all of the side effects of the above @@ -1018,9 +1018,9 @@ public void process(Object row, int tag) throws HiveException { } } if (conf.getWriteType() == AcidUtils.Operation.UPDATE) { - fpaths.updaters[writerOffset].update(conf.getTransactionId(), row); + fpaths.updaters[writerOffset].update(conf.getTableWriteId(), row); } else if (conf.getWriteType() == AcidUtils.Operation.DELETE) { - fpaths.updaters[writerOffset].delete(conf.getTransactionId(), row); + fpaths.updaters[writerOffset].delete(conf.getTableWriteId(), row); } else { throw new HiveException("Unknown write type " + conf.getWriteType().toString()); } @@ -1322,7 +1322,7 @@ public void closeOp(boolean abort) throws HiveException { } if (conf.isMmTable()) { Utilities.writeMmCommitManifest( - commitPaths, specPath, fs, taskId, conf.getTransactionId(), conf.getStatementId(), unionPath, conf.getInsertOverwrite()); + commitPaths, specPath, fs, taskId, conf.getTableWriteId(), conf.getStatementId(), unionPath, conf.getInsertOverwrite()); } // Only publish stats if this operator's flag was set to gather stats if (conf.isGatherStats()) { @@ -1380,7 +1380,7 @@ public void jobCloseOp(Configuration hconf, boolean success) MissingBucketsContext mbc = new MissingBucketsContext( conf.getTableInfo(), numBuckets, conf.getCompressed()); Utilities.handleMmTableFinalPath(specPath, unionSuffix, hconf, success, - dpLevels, lbLevels, mbc, conf.getTransactionId(), conf.getStatementId(), reporter, + dpLevels, lbLevels, mbc, conf.getTableWriteId(), conf.getStatementId(), reporter, conf.isMmTable(), conf.isMmCtas(), conf.getInsertOverwrite()); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 7348faa..92ab562 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -52,7 +52,6 @@ import java.util.Arrays; import java.util.Calendar; import java.util.Collection; -import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; @@ -101,7 +100,7 @@ import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.StringInternUtils; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.Warehouse; @@ -4389,7 +4388,7 @@ private static void deleteUncommitedFile(Path childPath, FileSystem fs) * if the entire directory is valid (has no uncommitted/temporary files). */ public static List getValidMmDirectoriesFromTableOrPart(Path path, Configuration conf, - ValidTxnList validTxnList, int lbLevels) throws IOException { + ValidWriteIdList validWriteIdList, int lbLevels) throws IOException { Utilities.FILE_OP_LOGGER.trace("Looking for valid MM paths under {}", path); // NULL means this directory is entirely valid. List result = null; @@ -4399,8 +4398,8 @@ private static void deleteUncommitedFile(Path childPath, FileSystem fs) for (int i = 0; i < children.length; ++i) { FileStatus file = children[i]; Path childPath = file.getPath(); - Long txnId = JavaUtils.extractTxnId(childPath); - if (!file.isDirectory() || txnId == null || !validTxnList.isTxnValid(txnId)) { + Long writeId = JavaUtils.extractWriteId(childPath); + if (!file.isDirectory() || writeId == null || !validWriteIdList.isWriteIdValid(writeId)) { Utilities.FILE_OP_LOGGER.debug("Skipping path {}", childPath); if (result == null) { result = new ArrayList<>(children.length - 1); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java index abd42ec..65fb2a0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java @@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.LogUtils; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.io.CachingPrintStream; import org.apache.hadoop.hive.common.log.LogRedirector; import org.apache.hadoop.hive.common.metrics.common.Metrics; @@ -487,6 +488,7 @@ private void initializeOperators(Map fetchOpJobConfMap) AcidUtils.setAcidTableScan(jobClone, ts.getConf().isAcidTable()); AcidUtils.setAcidOperationalProperties(jobClone, ts.getConf().getAcidOperationalProperties()); + AcidUtils.setValidWriteIdList(jobClone, ts.getConf()); // create a fetch operator FetchOperator fetchOp = new FetchOperator(entry.getValue(), jobClone); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java index 65eb434..1ed35b3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java @@ -20,7 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; @@ -109,8 +109,8 @@ extends InputFormat, InputFormatChecker { static final class DeltaMetaData implements Writable { - private long minTxnId; - private long maxTxnId; + private long minWriteId; + private long maxWriteId; private List stmtIds; //would be useful to have enum for Type: insert/delete/load data @@ -120,27 +120,27 @@ public DeltaMetaData() { /** * @param stmtIds delta dir suffixes when a single txn writes > 1 delta in the same partition */ - DeltaMetaData(long minTxnId, long maxTxnId, List stmtIds) { - this.minTxnId = minTxnId; - this.maxTxnId = maxTxnId; + DeltaMetaData(long minWriteId, long maxWriteId, List stmtIds) { + this.minWriteId = minWriteId; + this.maxWriteId = maxWriteId; if (stmtIds == null) { throw new IllegalArgumentException("stmtIds == null"); } this.stmtIds = stmtIds; } - long getMinTxnId() { - return minTxnId; + long getMinWriteId() { + return minWriteId; } - long getMaxTxnId() { - return maxTxnId; + long getMaxWriteId() { + return maxWriteId; } List getStmtIds() { return stmtIds; } @Override public void write(DataOutput out) throws IOException { - out.writeLong(minTxnId); - out.writeLong(maxTxnId); + out.writeLong(minWriteId); + out.writeLong(maxWriteId); out.writeInt(stmtIds.size()); for(Integer id : stmtIds) { out.writeInt(id); @@ -148,8 +148,8 @@ public void write(DataOutput out) throws IOException { } @Override public void readFields(DataInput in) throws IOException { - minTxnId = in.readLong(); - maxTxnId = in.readLong(); + minWriteId = in.readLong(); + maxWriteId = in.readLong(); stmtIds.clear(); int numStatements = in.readInt(); for(int i = 0; i < numStatements; i++) { @@ -159,7 +159,7 @@ public void readFields(DataInput in) throws IOException { @Override public String toString() { //? is Type - when implemented - return "Delta(?," + minTxnId + "," + maxTxnId + "," + stmtIds + ")"; + return "Delta(?," + minWriteId + "," + maxWriteId + "," + stmtIds + ")"; } } /** @@ -227,7 +227,7 @@ public Reporter getReporter() { * @param collapseEvents should the ACID events be collapsed so that only * the last version of the row is kept. * @param bucket the bucket to read - * @param validTxnList the list of valid transactions to use + * @param validWriteIdList the list of valid write ids to use * @param baseDirectory the base directory to read or the root directory for * old style files * @param deltaDirectory a list of delta files to include in the merge @@ -237,7 +237,7 @@ public Reporter getReporter() { RawReader getRawReader(Configuration conf, boolean collapseEvents, int bucket, - ValidTxnList validTxnList, + ValidWriteIdList validWriteIdList, Path baseDirectory, Path[] deltaDirectory ) throws IOException; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java index 26d4dc6..05beafe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java @@ -49,8 +49,8 @@ private boolean isCompressed = false; private Properties properties; private Reporter reporter; - private long minimumTransactionId; - private long maximumTransactionId; + private long minimumWriteId; + private long maximumWriteId; private int bucketId; /** * Based on {@link org.apache.hadoop.hive.ql.metadata.Hive#mvFile(HiveConf, FileSystem, Path, FileSystem, Path, boolean, boolean)} @@ -156,22 +156,22 @@ public Options reporter(Reporter reporter) { } /** - * The minimum transaction id that is included in this file. - * @param min minimum transaction id + * The minimum write id that is included in this file. + * @param min minimum write id * @return this */ - public Options minimumTransactionId(long min) { - this.minimumTransactionId = min; + public Options minimumWriteId(long min) { + this.minimumWriteId = min; return this; } /** - * The maximum transaction id that is included in this file. - * @param max maximum transaction id + * The maximum write id that is included in this file. + * @param max maximum write id * @return this */ - public Options maximumTransactionId(long max) { - this.maximumTransactionId = max; + public Options maximumWriteId(long max) { + this.maximumWriteId = max; return this; } @@ -236,7 +236,7 @@ public Options useDummy(PrintStream stream) { */ public Options statementId(int id) { if(id >= AcidUtils.MAX_STATEMENTS_PER_TXN) { - throw new RuntimeException("Too many statements for transactionId: " + maximumTransactionId); + throw new RuntimeException("Too many statements for writeId: " + maximumWriteId); } if(id < -1) { throw new IllegalArgumentException("Illegal statementId value: " + id); @@ -277,12 +277,12 @@ public Reporter getReporter() { return reporter; } - public long getMinimumTransactionId() { - return minimumTransactionId; + public long getMinimumWriteId() { + return minimumWriteId; } - public long getMaximumTransactionId() { - return maximumTransactionId; + public long getMaximumWriteId() { + return maximumWriteId; } public boolean isWritingBase() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 430e0fc..64dfa4a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -36,7 +36,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.DataOperationType; @@ -49,6 +50,7 @@ import org.apache.hadoop.hive.ql.io.orc.Reader; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.CreateTableDesc; +import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.shims.HadoopShims; import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId; import org.apache.hadoop.hive.shims.ShimLoader; @@ -224,8 +226,8 @@ public static String deleteDeltaSubdir(long min, long max, int statementId) { return deleteDeltaSubdir(min, max) + "_" + String.format(STATEMENT_DIGITS, statementId); } - public static String baseDir(long txnId) { - return BASE_PREFIX + String.format(DELTA_DIGITS, txnId); + public static String baseDir(long writeId) { + return BASE_PREFIX + String.format(DELTA_DIGITS, writeId); } /** @@ -254,31 +256,31 @@ public static Path createFilename(Path directory, options.getBucketId()) + "_0"); } else if (options.isWritingBase()) { subdir = BASE_PREFIX + String.format(DELTA_DIGITS, - options.getMaximumTransactionId()); + options.getMaximumWriteId()); } else if(options.getStatementId() == -1) { //when minor compaction runs, we collapse per statement delta files inside a single //transaction so we no longer need a statementId in the file name subdir = options.isWritingDeleteDelta() ? - deleteDeltaSubdir(options.getMinimumTransactionId(), - options.getMaximumTransactionId()) - : deltaSubdir(options.getMinimumTransactionId(), - options.getMaximumTransactionId()); + deleteDeltaSubdir(options.getMinimumWriteId(), + options.getMaximumWriteId()) + : deltaSubdir(options.getMinimumWriteId(), + options.getMaximumWriteId()); } else { subdir = options.isWritingDeleteDelta() ? - deleteDeltaSubdir(options.getMinimumTransactionId(), - options.getMaximumTransactionId(), + deleteDeltaSubdir(options.getMinimumWriteId(), + options.getMaximumWriteId(), options.getStatementId()) - : deltaSubdir(options.getMinimumTransactionId(), - options.getMaximumTransactionId(), + : deltaSubdir(options.getMinimumWriteId(), + options.getMaximumWriteId(), options.getStatementId()); } return createBucketFile(new Path(directory, subdir), options.getBucketId()); } /** - * Get the transaction id from a base directory name. + * Get the write id from a base directory name. * @param path the base directory name - * @return the maximum transaction id that is included + * @return the maximum write id that is included */ public static long parseBase(Path path) { String filename = path.getName(); @@ -306,8 +308,8 @@ public static long parseBase(Path path) { Integer.parseInt(filename.substring(0, filename.indexOf('_'))); result .setOldStyle(true) - .minimumTransactionId(0) - .maximumTransactionId(0) + .minimumWriteId(0) + .maximumWriteId(0) .bucket(bucket) .writingBase(!bucketFile.getParent().getName().startsWith(DELTA_PREFIX)); } @@ -318,8 +320,8 @@ else if(ORIGINAL_PATTERN_COPY.matcher(filename).matches()) { int copyNumber = Integer.parseInt(filename.substring(filename.lastIndexOf('_') + 1)); result .setOldStyle(true) - .minimumTransactionId(0) - .maximumTransactionId(0) + .minimumWriteId(0) + .maximumWriteId(0) .bucket(bucket) .copyNumber(copyNumber) .writingBase(!bucketFile.getParent().getName().startsWith(DELTA_PREFIX)); @@ -330,8 +332,8 @@ else if (filename.startsWith(BUCKET_PREFIX)) { if (bucketFile.getParent().getName().startsWith(BASE_PREFIX)) { result .setOldStyle(false) - .minimumTransactionId(0) - .maximumTransactionId(parseBase(bucketFile.getParent())) + .minimumWriteId(0) + .maximumWriteId(parseBase(bucketFile.getParent())) .bucket(bucket) .writingBase(true); } else if (bucketFile.getParent().getName().startsWith(DELTA_PREFIX)) { @@ -339,21 +341,21 @@ else if (filename.startsWith(BUCKET_PREFIX)) { bucketFile.getFileSystem(conf)); result .setOldStyle(false) - .minimumTransactionId(parsedDelta.minTransaction) - .maximumTransactionId(parsedDelta.maxTransaction) + .minimumWriteId(parsedDelta.minWriteId) + .maximumWriteId(parsedDelta.maxWriteId) .bucket(bucket); } else if (bucketFile.getParent().getName().startsWith(DELETE_DELTA_PREFIX)) { ParsedDelta parsedDelta = parsedDelta(bucketFile.getParent(), DELETE_DELTA_PREFIX, bucketFile.getFileSystem(conf)); result .setOldStyle(false) - .minimumTransactionId(parsedDelta.minTransaction) - .maximumTransactionId(parsedDelta.maxTransaction) + .minimumWriteId(parsedDelta.minWriteId) + .maximumWriteId(parsedDelta.maxWriteId) .bucket(bucket); } } else { - result.setOldStyle(true).bucket(-1).minimumTransactionId(0) - .maximumTransactionId(0); + result.setOldStyle(true).bucket(-1).minimumWriteId(0) + .maximumWriteId(0); } return result; } @@ -637,8 +639,8 @@ public String toString() { * Immutable */ public static final class ParsedDelta implements Comparable { - private final long minTransaction; - private final long maxTransaction; + private final long minWriteId; + private final long maxWriteId; private final FileStatus path; //-1 is for internal (getAcidState()) purposes and means the delta dir //had no statement ID @@ -655,8 +657,8 @@ private ParsedDelta(long min, long max, FileStatus path, boolean isDeleteDelta, } private ParsedDelta(long min, long max, FileStatus path, int statementId, boolean isDeleteDelta, boolean isRawFormat) { - this.minTransaction = min; - this.maxTransaction = max; + this.minWriteId = min; + this.maxWriteId = max; this.path = path; this.statementId = statementId; this.isDeleteDelta = isDeleteDelta; @@ -664,12 +666,12 @@ private ParsedDelta(long min, long max, FileStatus path, int statementId, assert !isDeleteDelta || !isRawFormat : " deleteDelta should not be raw format"; } - public long getMinTransaction() { - return minTransaction; + public long getMinWriteId() { + return minWriteId; } - public long getMaxTransaction() { - return maxTransaction; + public long getMaxWriteId() { + return maxWriteId; } public Path getPath() { @@ -698,14 +700,14 @@ public boolean isRawFormat() { */ @Override public int compareTo(ParsedDelta parsedDelta) { - if (minTransaction != parsedDelta.minTransaction) { - if (minTransaction < parsedDelta.minTransaction) { + if (minWriteId != parsedDelta.minWriteId) { + if (minWriteId < parsedDelta.minWriteId) { return -1; } else { return 1; } - } else if (maxTransaction != parsedDelta.maxTransaction) { - if (maxTransaction < parsedDelta.maxTransaction) { + } else if (maxWriteId != parsedDelta.maxWriteId) { + if (maxWriteId < parsedDelta.maxWriteId) { return 1; } else { return -1; @@ -754,11 +756,11 @@ else if(statementId != parsedDelta.statementId) { List result = new ArrayList<>(deltas.size()); AcidInputFormat.DeltaMetaData last = null; for(ParsedDelta parsedDelta : deltas) { - if(last != null && last.getMinTxnId() == parsedDelta.getMinTransaction() && last.getMaxTxnId() == parsedDelta.getMaxTransaction()) { + if(last != null && last.getMinWriteId() == parsedDelta.getMinWriteId() && last.getMaxWriteId() == parsedDelta.getMaxWriteId()) { last.getStmtIds().add(parsedDelta.getStatementId()); continue; } - last = new AcidInputFormat.DeltaMetaData(parsedDelta.getMinTransaction(), parsedDelta.getMaxTransaction(), new ArrayList()); + last = new AcidInputFormat.DeltaMetaData(parsedDelta.getMinWriteId(), parsedDelta.getMaxWriteId(), new ArrayList()); result.add(last); if(parsedDelta.statementId >= 0) { last.getStmtIds().add(parsedDelta.getStatementId()); @@ -780,11 +782,11 @@ else if(statementId != parsedDelta.statementId) { List results = new ArrayList(deleteDeltas.size()); for(AcidInputFormat.DeltaMetaData dmd : deleteDeltas) { if(dmd.getStmtIds().isEmpty()) { - results.add(new Path(root, deleteDeltaSubdir(dmd.getMinTxnId(), dmd.getMaxTxnId()))); + results.add(new Path(root, deleteDeltaSubdir(dmd.getMinWriteId(), dmd.getMaxWriteId()))); continue; } for(Integer stmtId : dmd.getStmtIds()) { - results.add(new Path(root, deleteDeltaSubdir(dmd.getMinTxnId(), dmd.getMaxTxnId(), stmtId))); + results.add(new Path(root, deleteDeltaSubdir(dmd.getMinWriteId(), dmd.getMaxWriteId(), stmtId))); } } return results.toArray(new Path[results.size()]); @@ -802,8 +804,8 @@ private static ParsedDelta parseDelta(FileStatus path, String deltaPrefix, FileS throws IOException { ParsedDelta p = parsedDelta(path.getPath(), deltaPrefix, fs); boolean isDeleteDelta = deltaPrefix.equals(DELETE_DELTA_PREFIX); - return new ParsedDelta(p.getMinTransaction(), - p.getMaxTransaction(), path, p.statementId, isDeleteDelta, p.isRawFormat()); + return new ParsedDelta(p.getMinWriteId(), + p.getMaxWriteId(), path, p.statementId, isDeleteDelta, p.isRawFormat()); } public static ParsedDelta parsedDelta(Path deltaDir, String deltaPrefix, FileSystem fs) @@ -856,16 +858,16 @@ public static boolean isAcid(Path directory, @VisibleForTesting public static Directory getAcidState(Path directory, Configuration conf, - ValidTxnList txnList + ValidWriteIdList writeIdList ) throws IOException { - return getAcidState(directory, conf, txnList, false, false); + return getAcidState(directory, conf, writeIdList, false, false); } /** State class for getChildState; cannot modify 2 things in a method. */ private static class TxnBase { private FileStatus status; - private long txn = 0; - private long oldestBaseTxnId = Long.MAX_VALUE; + private long writeId = 0; + private long oldestBaseWriteId = Long.MAX_VALUE; private Path oldestBase = null; } @@ -876,22 +878,22 @@ public static Directory getAcidState(Path directory, * transaction id that we must exclude. * @param directory the partition directory to analyze * @param conf the configuration - * @param txnList the list of transactions that we are reading + * @param writeIdList the list of write ids that we are reading * @return the state of the directory * @throws IOException */ public static Directory getAcidState(Path directory, Configuration conf, - ValidTxnList txnList, + ValidWriteIdList writeIdList, boolean useFileIds, boolean ignoreEmptyFiles ) throws IOException { - return getAcidState(directory, conf, txnList, Ref.from(useFileIds), ignoreEmptyFiles, null); + return getAcidState(directory, conf, writeIdList, Ref.from(useFileIds), ignoreEmptyFiles, null); } public static Directory getAcidState(Path directory, Configuration conf, - ValidTxnList txnList, + ValidWriteIdList writeIdList, Ref useFileIds, boolean ignoreEmptyFiles, Map tblproperties) throws IOException { @@ -921,13 +923,13 @@ public static Directory getAcidState(Path directory, final List original = new ArrayList<>(); if (childrenWithId != null) { for (HdfsFileStatusWithId child : childrenWithId) { - getChildState(child.getFileStatus(), child, txnList, working, originalDirectories, original, + getChildState(child.getFileStatus(), child, writeIdList, working, originalDirectories, original, obsolete, bestBase, ignoreEmptyFiles, abortedDirectories, tblproperties, fs); } } else { List children = HdfsUtils.listLocatedStatus(fs, directory, hiddenFileFilter); for (FileStatus child : children) { - getChildState(child, null, txnList, working, originalDirectories, original, obsolete, + getChildState(child, null, writeIdList, working, originalDirectories, original, obsolete, bestBase, ignoreEmptyFiles, abortedDirectories, tblproperties, fs); } } @@ -955,30 +957,30 @@ public static Directory getAcidState(Path directory, Collections.sort(working); //so now, 'working' should be sorted like delta_5_20 delta_5_10 delta_11_20 delta_51_60 for example //and we want to end up with the best set containing all relevant data: delta_5_20 delta_51_60, - //subject to list of 'exceptions' in 'txnList' (not show in above example). - long current = bestBase.txn; + //subject to list of 'exceptions' in 'writeIdList' (not show in above example). + long current = bestBase.writeId; int lastStmtId = -1; ParsedDelta prev = null; for(ParsedDelta next: working) { - if (next.maxTransaction > current) { + if (next.maxWriteId > current) { // are any of the new transactions ones that we care about? - if (txnList.isTxnRangeValid(current+1, next.maxTransaction) != - ValidTxnList.RangeResponse.NONE) { + if (writeIdList.isWriteIdRangeValid(current+1, next.maxWriteId) != + ValidWriteIdList.RangeResponse.NONE) { deltas.add(next); - current = next.maxTransaction; + current = next.maxWriteId; lastStmtId = next.statementId; prev = next; } } - else if(next.maxTransaction == current && lastStmtId >= 0) { + else if(next.maxWriteId == current && lastStmtId >= 0) { //make sure to get all deltas within a single transaction; multi-statement txn //generate multiple delta files with the same txnId range - //of course, if maxTransaction has already been minor compacted, all per statement deltas are obsolete + //of course, if maxWriteId has already been minor compacted, all per statement deltas are obsolete deltas.add(next); prev = next; } - else if (prev != null && next.maxTransaction == prev.maxTransaction - && next.minTransaction == prev.minTransaction + else if (prev != null && next.maxWriteId == prev.maxWriteId + && next.minWriteId == prev.minWriteId && next.statementId == prev.statementId) { // The 'next' parsedDelta may have everything equal to the 'prev' parsedDelta, except // the path. This may happen when we have split update and we have two types of delta @@ -1002,15 +1004,15 @@ else if (prev != null && next.maxTransaction == prev.maxTransaction if(bestBase.oldestBase != null && bestBase.status == null) { /** * If here, it means there was a base_x (> 1 perhaps) but none were suitable for given - * {@link txnList}. Note that 'original' files are logically a base_Long.MIN_VALUE and thus + * {@link writeIdList}. Note that 'original' files are logically a base_Long.MIN_VALUE and thus * cannot have any data for an open txn. We could check {@link deltas} has files to cover * [1,n] w/o gaps but this would almost never happen...*/ - long[] exceptions = txnList.getInvalidTransactions(); - String minOpenTxn = exceptions != null && exceptions.length > 0 ? + long[] exceptions = writeIdList.getInvalidWriteIds(); + String minOpenWriteId = exceptions != null && exceptions.length > 0 ? Long.toString(exceptions[0]) : "x"; throw new IOException(ErrorMsg.ACID_NOT_ENOUGH_HISTORY.format( - Long.toString(txnList.getHighWatermark()), - minOpenTxn, bestBase.oldestBase.toString())); + Long.toString(writeIdList.getHighWatermark()), + minOpenWriteId, bestBase.oldestBase.toString())); } final Path base = bestBase.status == null ? null : bestBase.status.getPath(); @@ -1071,43 +1073,44 @@ public boolean isBaseInRawFormat() { * causes anything written previously is ignored (hence the overwrite). In this case, base_x * is visible if txnid:x is committed for current reader. */ - private static boolean isValidBase(long baseTxnId, ValidTxnList txnList, Path baseDir, - FileSystem fs) throws IOException { - if(baseTxnId == Long.MIN_VALUE) { + private static boolean isValidBase(long baseWriteId, ValidWriteIdList writeIdList, Path baseDir, + FileSystem fs) throws IOException { + if(baseWriteId == Long.MIN_VALUE) { //such base is created by 1st compaction in case of non-acid to acid table conversion //By definition there are no open txns with id < 1. return true; } if(!MetaDataFile.isCompacted(baseDir, fs)) { //this is the IOW case - return txnList.isTxnValid(baseTxnId); + return writeIdList.isWriteIdValid(baseWriteId); } - return txnList.isValidBase(baseTxnId); + return writeIdList.isValidBase(baseWriteId); } + private static void getChildState(FileStatus child, HdfsFileStatusWithId childWithId, - ValidTxnList txnList, List working, List originalDirectories, + ValidWriteIdList writeIdList, List working, List originalDirectories, List original, List obsolete, TxnBase bestBase, boolean ignoreEmptyFiles, List aborted, Map tblproperties, FileSystem fs) throws IOException { Path p = child.getPath(); String fn = p.getName(); if (fn.startsWith(BASE_PREFIX) && child.isDir()) { - long txn = parseBase(p); - if(bestBase.oldestBaseTxnId > txn) { + long writeId = parseBase(p); + if(bestBase.oldestBaseWriteId > writeId) { //keep track for error reporting bestBase.oldestBase = p; - bestBase.oldestBaseTxnId = txn; + bestBase.oldestBaseWriteId = writeId; } if (bestBase.status == null) { - if(isValidBase(txn, txnList, p, fs)) { + if(isValidBase(writeId, writeIdList, p, fs)) { bestBase.status = child; - bestBase.txn = txn; + bestBase.writeId = writeId; } - } else if (bestBase.txn < txn) { - if(isValidBase(txn, txnList, p, fs)) { + } else if (bestBase.writeId < writeId) { + if(isValidBase(writeId, writeIdList, p, fs)) { obsolete.add(bestBase.status); bestBase.status = child; - bestBase.txn = txn; + bestBase.writeId = writeId; } } else { obsolete.add(child); @@ -1118,12 +1121,12 @@ private static void getChildState(FileStatus child, HdfsFileStatusWithId childWi (fn.startsWith(DELTA_PREFIX)) ? DELTA_PREFIX : DELETE_DELTA_PREFIX; ParsedDelta delta = parseDelta(child, deltaPrefix, fs); if (tblproperties != null && AcidUtils.isInsertOnlyTable(tblproperties) && - ValidTxnList.RangeResponse.ALL == txnList.isTxnRangeAborted(delta.minTransaction, delta.maxTransaction)) { + ValidWriteIdList.RangeResponse.ALL == writeIdList.isWriteIdRangeAborted(delta.minWriteId, delta.maxWriteId)) { aborted.add(child); } - if (txnList.isTxnRangeValid(delta.minTransaction, - delta.maxTransaction) != - ValidTxnList.RangeResponse.NONE) { + if (writeIdList.isWriteIdRangeValid(delta.minWriteId, + delta.maxWriteId) != + ValidWriteIdList.RangeResponse.NONE) { working.add(delta); } } else if (child.isDir()) { @@ -1393,7 +1396,7 @@ public static AcidOperationalProperties getAcidOperationalProperties( * Returns the logical end of file for an acid data file. * * This relies on the fact that if delta_x_y has no committed transactions it wil be filtered out - * by {@link #getAcidState(Path, Configuration, ValidTxnList)} and so won't be read at all. + * by {@link #getAcidState(Path, Configuration, ValidWriteIdList)} and so won't be read at all. * @param file - data file to read/compute splits on */ public static long getLogicalLength(FileSystem fs, FileStatus file) throws IOException { @@ -1492,6 +1495,39 @@ public static boolean isRemovedInsertOnlyTable(Set removedSet) { } /** + * Extract the ValidWriteIdList for the given table from the list of tables' ValidWriteIdList + */ + public static ValidWriteIdList getTableValidWriteIdList(Configuration conf, String fullTableName) { + String txnString = conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); + ValidTxnWriteIdList validTxnList = new ValidTxnWriteIdList(txnString); + return validTxnList.getTableWriteIdList(fullTableName); + } + + /** + * Set the valid write id list for the current table scan + */ + public static void setValidWriteIdList(Configuration conf, ValidWriteIdList validWriteIds) { + conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, validWriteIds.toString()); + } + + /** + * Set the valid write id list for the current table scan + */ + public static void setValidWriteIdList(Configuration conf, TableScanDesc tsDesc) { + String dbName = tsDesc.getDatabaseName(); + String tableName = tsDesc.getTableName(); + if ((dbName != null) && (tableName != null)) { + ValidWriteIdList validWriteIdList = AcidUtils.getTableValidWriteIdList(conf, + AcidUtils.getFullTableName(dbName, tableName)); + setValidWriteIdList(conf, validWriteIdList); + } + } + + public static String getFullTableName(String dbName, String tableName) { + return dbName.toLowerCase() + "." + tableName.toLowerCase(); + } + + /** * General facility to place a metadta file into a dir created by acid/compactor write. * * Load Data commands against Acid tables write {@link AcidBaseFileType#ORIGINAL_BASE} type files diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java index f0d4988..71498a1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java @@ -341,8 +341,8 @@ private static RecordUpdater getRecordUpdater(JobConf jc, .tableProperties(tableProp) .reporter(reporter) .writingBase(conf.getInsertOverwrite()) - .minimumTransactionId(conf.getTransactionId()) - .maximumTransactionId(conf.getTransactionId()) + .minimumWriteId(conf.getTableWriteId()) + .maximumWriteId(conf.getTableWriteId()) .bucket(bucket) .inspector(inspector) .recordIdColumn(rowIdColNum) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index c3b846c..56f5d2b 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -35,8 +35,8 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StringInternUtils; -import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; import org.apache.hive.common.util.Ref; import org.slf4j.Logger; @@ -457,18 +457,19 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job InputFormat inputFormat, Class inputFormatClass, int splits, TableDesc table, List result) throws IOException { - ValidTxnList validTxnList; + ValidWriteIdList validWriteIdList = AcidUtils.getTableValidWriteIdList(conf, table.getTableName()); + ValidWriteIdList validMmWriteIdList; if (AcidUtils.isInsertOnlyTable(table.getProperties())) { - String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); - validTxnList = txnString == null ? new ValidReadTxnList() : new ValidReadTxnList(txnString); + validMmWriteIdList = validWriteIdList; } else { - validTxnList = null; // for non-MM case + validMmWriteIdList = null; // for non-MM case } try { Utilities.copyTablePropertiesToConf(table, conf); if(tableScan != null) { AcidUtils.setAcidTableScan(conf, tableScan.getConf().isAcidTable()); + AcidUtils.setValidWriteIdList(conf, validWriteIdList); } } catch (HiveException e) { throw new IOException(e); @@ -478,7 +479,7 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job pushFilters(conf, tableScan, this.mrwork); } - Path[] finalDirs = processPathsForMmRead(dirs, conf, validTxnList); + Path[] finalDirs = processPathsForMmRead(dirs, conf, validMmWriteIdList); if (finalDirs == null) { return; // No valid inputs. } @@ -503,13 +504,13 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job } public static Path[] processPathsForMmRead(List dirs, JobConf conf, - ValidTxnList validTxnList) throws IOException { - if (validTxnList == null) { + ValidWriteIdList validWriteIdList) throws IOException { + if (validWriteIdList == null) { return dirs.toArray(new Path[dirs.size()]); } else { List finalPaths = new ArrayList<>(dirs.size()); for (Path dir : dirs) { - processForWriteIds(dir, conf, validTxnList, finalPaths); + processForWriteIds(dir, conf, validWriteIdList, finalPaths); } if (finalPaths.isEmpty()) { LOG.warn("No valid inputs found in " + dirs); @@ -520,7 +521,7 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job } private static void processForWriteIds(Path dir, JobConf conf, - ValidTxnList validTxnList, List finalPaths) throws IOException { + ValidWriteIdList validWriteIdList, List finalPaths) throws IOException { FileSystem fs = dir.getFileSystem(conf); if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("Checking " + dir + " (root) for inputs"); @@ -546,10 +547,10 @@ private static void processForWriteIds(Path dir, JobConf conf, } if (!file.isDirectory()) { Utilities.FILE_OP_LOGGER.warn("Ignoring a file not in MM directory " + path); - } else if (JavaUtils.extractTxnId(path) == null) { + } else if (JavaUtils.extractWriteId(path) == null) { subdirs.add(path); } else if (!hadAcidState) { - AcidUtils.Directory dirInfo = AcidUtils.getAcidState(currDir, conf, validTxnList, Ref.from(false), true, null); + AcidUtils.Directory dirInfo = AcidUtils.getAcidState(currDir, conf, validWriteIdList, Ref.from(false), true, null); hadAcidState = true; // Find the base, created for IOW. @@ -861,6 +862,8 @@ protected void pushProjectionsAndFilters(JobConf jobConf, Class inputFormatClass AcidUtils.setAcidTableScan(job, ts.getConf().isAcidTable()); AcidUtils.setAcidOperationalProperties(job, ts.getConf().getAcidOperationalProperties()); + AcidUtils.setValidWriteIdList(job, ts.getConf()); + } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java index 0c37203..1f673da 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java @@ -88,7 +88,7 @@ public static void toArray(RecordIdentifier ri, Object[] struct) { Arrays.fill(struct, null); return; } - struct[Field.transactionId.ordinal()] = ri.getTransactionId(); + struct[Field.transactionId.ordinal()] = ri.getWriteId(); struct[Field.bucketId.ordinal()] = ri.getBucketProperty(); struct[Field.rowId.ordinal()] = ri.getRowId(); } @@ -101,20 +101,20 @@ public static void toArray(RecordIdentifier ri, Object[] struct) { public RecordIdentifier() { } - public RecordIdentifier(long transactionId, int bucket, long rowId) { - this.transactionId = transactionId; + public RecordIdentifier(long writeId, int bucket, long rowId) { + this.transactionId = writeId; this.bucketId = bucket; this.rowId = rowId; } /** * Set the identifier. - * @param transactionId the transaction id + * @param writeId the write id * @param bucketId the bucket id * @param rowId the row id */ - public void setValues(long transactionId, int bucketId, long rowId) { - this.transactionId = transactionId; + public void setValues(long writeId, int bucketId, long rowId) { + this.transactionId = writeId; this.bucketId = bucketId; this.rowId = rowId; } @@ -134,10 +134,10 @@ public void setRowId(long rowId) { } /** - * What was the original transaction id for the last row? - * @return the transaction id + * What was the original write id for the last row? + * @return the write id */ - public long getTransactionId() { + public long getWriteId() { return transactionId; } @@ -223,7 +223,7 @@ public String toString() { BucketCodec.determineVersion(bucketId); String s = "(" + codec.getVersion() + "." + codec.decodeWriterId(bucketId) + "." + codec.decodeStatementId(bucketId) + ")"; - return "{originalTxn: " + transactionId + ", " + bucketToString() + ", row: " + getRowId() +"}"; + return "{originalWriteId: " + transactionId + ", " + bucketToString() + ", row: " + getRowId() +"}"; } protected String bucketToString() { BucketCodec codec = diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordUpdater.java index 36111f0..0aed172 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordUpdater.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordUpdater.java @@ -30,27 +30,26 @@ /** * Insert a new record into the table. - * @param currentTransaction the transaction id of the current transaction. + * @param currentWriteId the table write id of the current transaction. * @param row the row of data to insert * @throws IOException */ - void insert(long currentTransaction, - Object row) throws IOException; + void insert(long currentWriteId, Object row) throws IOException; /** * Update an old record with a new set of values. - * @param currentTransaction the current transaction id + * @param currentWriteId the current write id * @param row the new values for the row * @throws IOException */ - void update(long currentTransaction, Object row) throws IOException; + void update(long currentWriteId, Object row) throws IOException; /** * Delete a row from the table. - * @param currentTransaction the current transaction id + * @param currentWriteId the current write id * @throws IOException */ - void delete(long currentTransaction, Object row) throws IOException; + void delete(long currentWriteId, Object row) throws IOException; /** * Flush the current set of rows to the underlying file system, so that diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index ff2cc04..eb30626 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@ -49,8 +49,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.Metastore; @@ -570,7 +570,7 @@ public boolean validateInput(FileSystem fs, HiveConf conf, private final boolean forceThreadpool; private final AtomicInteger cacheHitCounter = new AtomicInteger(0); private final AtomicInteger numFilesCounter = new AtomicInteger(0); - private final ValidTxnList transactionList; + private final ValidWriteIdList writeIdList; private SplitStrategyKind splitStrategyKind; private final SearchArgument sarg; private final AcidOperationalProperties acidOperationalProperties; @@ -652,8 +652,8 @@ public boolean validateInput(FileSystem fs, HiveConf conf, footerCache = useExternalCache ? metaCache : localCache; } } - String value = conf.get(ValidTxnList.VALID_TXNS_KEY); - transactionList = value == null ? new ValidReadTxnList() : new ValidReadTxnList(value); + String value = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY); + writeIdList = value == null ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(value); // Determine the transactional_properties of the table from the job conf stored in context. // The table properties are copied to job conf at HiveInputFormat::addSplitsForGroup(), @@ -935,10 +935,6 @@ public Void run() throws Exception { } } - - - - private void runGetSplitsSync(List>> splitFutures, List splits, UserGroupInformation ugi) throws IOException { UserGroupInformation tpUgi = ugi == null ? UserGroupInformation.getCurrentUser() : ugi; @@ -1095,8 +1091,8 @@ public String toString() { /** * For plain or acid tables this is the root of the partition (or table if not partitioned). * For MM table this is delta/ or base/ dir. In MM case applying of the ValidTxnList that - * {@link AcidUtils#getAcidState(Path, Configuration, ValidTxnList)} normally does has already - * been done in {@link HiveInputFormat#processPathsForMmRead(List, JobConf, ValidTxnList)}. + * {@link AcidUtils#getAcidState(Path, Configuration, ValidWriteIdList)} normally does has already + * been done in {@link HiveInputFormat#processPathsForMmRead(List, JobConf, ValidWriteIdList)}. */ private final Path dir; private final Ref useFileIds; @@ -1136,7 +1132,7 @@ public AcidDirInfo run() throws Exception { private AcidDirInfo callInternal() throws IOException { //todo: shouldn't ignoreEmptyFiles be set based on ExecutionEngine? AcidUtils.Directory dirInfo = AcidUtils.getAcidState(dir, context.conf, - context.transactionList, useFileIds, true, null); + context.writeIdList, useFileIds, true, null); // find the base files (original or new style) List baseFiles = new ArrayList<>(); if (dirInfo.getBaseDirectory() == null) { @@ -1175,8 +1171,8 @@ private AcidDirInfo callInternal() throws IOException { AcidUtils.AcidBaseFileType.ORIGINAL_BASE : AcidUtils.AcidBaseFileType.ACID_SCHEMA; PathFilter bucketFilter = parsedDelta.isRawFormat() ? AcidUtils.originalBucketFilter : AcidUtils.bucketFileFilter; - if(parsedDelta.isRawFormat() && parsedDelta.getMinTransaction() != - parsedDelta.getMaxTransaction()) { + if(parsedDelta.isRawFormat() && parsedDelta.getMinWriteId() != + parsedDelta.getMaxWriteId()) { //delta/ with files in raw format are a result of Load Data (as opposed to compaction //or streaming ingest so must have interval length == 1. throw new IllegalStateException("Delta in " + AcidUtils.AcidBaseFileType.ORIGINAL_BASE @@ -2012,12 +2008,12 @@ public float getProgress() throws IOException { final Reader.Options readOptions = OrcInputFormat.createOptionsForReader(conf); readOptions.range(split.getStart(), split.getLength()); - String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); - ValidTxnList validTxnList = txnString == null ? new ValidReadTxnList() : - new ValidReadTxnList(txnString); + String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY); + ValidWriteIdList validWriteIdList = txnString == null ? new ValidReaderWriteIdList() : + new ValidReaderWriteIdList(txnString); final OrcRawRecordMerger records = new OrcRawRecordMerger(conf, true, reader, split.isOriginal(), bucket, - validTxnList, readOptions, deltas, mergerOptions); + validWriteIdList, readOptions, deltas, mergerOptions); return new RowReader() { OrcStruct innerRecord = records.createValue(); @@ -2299,7 +2295,7 @@ private static boolean isStripeSatisfyPredicate( public RawReader getRawReader(Configuration conf, boolean collapseEvents, int bucket, - ValidTxnList validTxnList, + ValidWriteIdList validWriteIdList, Path baseDirectory, Path[] deltaDirectory ) throws IOException { @@ -2323,7 +2319,7 @@ private static boolean isStripeSatisfyPredicate( mergerOptions.rootPath(deltaDirectory[0].getParent()); } return new OrcRawRecordMerger(conf, collapseEvents, null, isOriginal, - bucket, validTxnList, new Reader.Options(), deltaDirectory, mergerOptions); + bucket, validWriteIdList, new Reader.Options(), deltaDirectory, mergerOptions); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java index f1f638d..57e005d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java @@ -204,20 +204,20 @@ private DummyOrcRecordUpdater(Path path, Options options) { } @Override - public void insert(long currentTransaction, Object row) throws IOException { - out.println("insert " + path + " currTxn: " + currentTransaction + + public void insert(long currentWriteId, Object row) throws IOException { + out.println("insert " + path + " currWriteId: " + currentWriteId + " obj: " + stringifyObject(row, inspector)); } @Override - public void update(long currentTransaction, Object row) throws IOException { - out.println("update " + path + " currTxn: " + currentTransaction + + public void update(long currentWriteId, Object row) throws IOException { + out.println("update " + path + " currWriteId: " + currentWriteId + " obj: " + stringifyObject(row, inspector)); } @Override - public void delete(long currentTransaction, Object row) throws IOException { - out.println("delete " + path + " currTxn: " + currentTransaction + " obj: " + row); + public void delete(long currentWriteId, Object row) throws IOException { + out.println("delete " + path + " currWriteId: " + currentWriteId + " obj: " + row); } @Override @@ -307,7 +307,7 @@ public void write(Writable w) throws IOException { watcher.addKey( ((IntWritable) orc.getFieldValue(OrcRecordUpdater.OPERATION)).get(), ((LongWritable) - orc.getFieldValue(OrcRecordUpdater.ORIGINAL_TRANSACTION)).get(), + orc.getFieldValue(OrcRecordUpdater.ORIGINAL_WRITEID)).get(), ((IntWritable) orc.getFieldValue(OrcRecordUpdater.BUCKET)).get(), ((LongWritable) orc.getFieldValue(OrcRecordUpdater.ROW_ID)).get()); writer.addRow(w); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java index 779da4f..7c99964 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java @@ -37,7 +37,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.ql.io.AcidInputFormat; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.RecordIdentifier; @@ -60,7 +60,7 @@ private final ObjectInspector objectInspector; private final long offset; private final long length; - private final ValidTxnList validTxnList; + private final ValidWriteIdList validWriteIdList; private final int columns; private final ReaderKey prevKey = new ReaderKey(); // this is the key less than the lowest key we need to process @@ -70,15 +70,15 @@ // an extra value so that we can return it while reading ahead private OrcStruct extraValue; /** - * A RecordIdentifier extended with the current transaction id. This is the - * key of our merge sort with the originalTransaction, bucket, and rowId - * ascending and the currentTransaction, statementId descending. This means that if the + * A RecordIdentifier extended with the current write id. This is the + * key of our merge sort with the originalWriteId, bucket, and rowId + * ascending and the currentWriteId, statementId descending. This means that if the * reader is collapsing events to just the last update, just the first * instance of each record is required. */ @VisibleForTesting public final static class ReaderKey extends RecordIdentifier{ - private long currentTransactionId; + private long currentWriteId; /** * This is the value from delta file name which may be different from value encode in * {@link RecordIdentifier#getBucketProperty()} in case of Update/Delete. @@ -86,54 +86,54 @@ * or delete event. For Acid 2.0 + multi-stmt txn, it must be a delete event. * No 2 Insert events from can ever agree on {@link RecordIdentifier} */ - private int statementId;//sort on this descending, like currentTransactionId + private int statementId;//sort on this descending, like currentWriteId ReaderKey() { this(-1, -1, -1, -1, 0); } - ReaderKey(long originalTransaction, int bucket, long rowId, - long currentTransactionId) { - this(originalTransaction, bucket, rowId, currentTransactionId, 0); + ReaderKey(long originalWriteId, int bucket, long rowId, + long currentWriteId) { + this(originalWriteId, bucket, rowId, currentWriteId, 0); } /** * @param statementId - set this to 0 if N/A */ - public ReaderKey(long originalTransaction, int bucket, long rowId, - long currentTransactionId, int statementId) { - super(originalTransaction, bucket, rowId); - this.currentTransactionId = currentTransactionId; + public ReaderKey(long originalWriteId, int bucket, long rowId, + long currentWriteId, int statementId) { + super(originalWriteId, bucket, rowId); + this.currentWriteId = currentWriteId; this.statementId = statementId; } @Override public void set(RecordIdentifier other) { super.set(other); - currentTransactionId = ((ReaderKey) other).currentTransactionId; + currentWriteId = ((ReaderKey) other).currentWriteId; statementId = ((ReaderKey) other).statementId; } - public void setValues(long originalTransactionId, + public void setValues(long originalWriteId, int bucket, long rowId, - long currentTransactionId, + long currentWriteId, int statementId) { - setValues(originalTransactionId, bucket, rowId); - this.currentTransactionId = currentTransactionId; + setValues(originalWriteId, bucket, rowId); + this.currentWriteId = currentWriteId; this.statementId = statementId; } @Override public boolean equals(Object other) { return super.equals(other) && - currentTransactionId == ((ReaderKey) other).currentTransactionId + currentWriteId == ((ReaderKey) other).currentWriteId && statementId == ((ReaderKey) other).statementId//consistent with compareTo() ; } @Override public int hashCode() { int result = super.hashCode(); - result = 31 * result + (int)(currentTransactionId ^ (currentTransactionId >>> 32)); + result = 31 * result + (int)(currentWriteId ^ (currentWriteId >>> 32)); result = 31 * result + statementId; return result; } @@ -145,8 +145,8 @@ public int compareTo(RecordIdentifier other) { if (sup == 0) { if (other.getClass() == ReaderKey.class) { ReaderKey oth = (ReaderKey) other; - if (currentTransactionId != oth.currentTransactionId) { - return currentTransactionId < oth.currentTransactionId ? +1 : -1; + if (currentWriteId != oth.currentWriteId) { + return currentWriteId < oth.currentWriteId ? +1 : -1; } if(statementId != oth.statementId) { return statementId < oth.statementId ? +1 : -1; @@ -162,15 +162,15 @@ public int compareTo(RecordIdentifier other) { * This means 1 txn modified the same row more than once */ private boolean isSameRow(ReaderKey other) { - return compareRow(other) == 0 && currentTransactionId == other.currentTransactionId; + return compareRow(other) == 0 && currentWriteId == other.currentWriteId; } - long getCurrentTransactionId() { - return currentTransactionId; + long getCurrentWriteId() { + return currentWriteId; } /** - * Compare rows without considering the currentTransactionId. + * Compare rows without considering the currentWriteId. * @param other the value to compare to * @return -1, 0, +1 */ @@ -180,9 +180,9 @@ int compareRow(RecordIdentifier other) { @Override public String toString() { - return "{originalTxn: " + getTransactionId() + ", " + - bucketToString() + ", row: " + getRowId() + ", currentTxn: " + - currentTransactionId + ", statementId: "+ statementId + "}"; + return "{originalWriteId: " + getWriteId() + ", " + + bucketToString() + ", row: " + getRowId() + ", currentWriteId " + + currentWriteId + ", statementId: "+ statementId + "}"; } } interface ReaderPair { @@ -389,9 +389,9 @@ final boolean nextFromCurrentFile(OrcStruct next) throws IOException { IntWritable operation = new IntWritable(OrcRecordUpdater.INSERT_OPERATION); nextRecord().setFieldValue(OrcRecordUpdater.OPERATION, operation); - nextRecord().setFieldValue(OrcRecordUpdater.CURRENT_TRANSACTION, + nextRecord().setFieldValue(OrcRecordUpdater.CURRENT_WRITEID, new LongWritable(transactionId)); - nextRecord().setFieldValue(OrcRecordUpdater.ORIGINAL_TRANSACTION, + nextRecord().setFieldValue(OrcRecordUpdater.ORIGINAL_WRITEID, new LongWritable(transactionId)); nextRecord().setFieldValue(OrcRecordUpdater.BUCKET, new IntWritable(bucketProperty)); @@ -403,11 +403,11 @@ final boolean nextFromCurrentFile(OrcStruct next) throws IOException { nextRecord = next; ((IntWritable) next.getFieldValue(OrcRecordUpdater.OPERATION)) .set(OrcRecordUpdater.INSERT_OPERATION); - ((LongWritable) next.getFieldValue(OrcRecordUpdater.ORIGINAL_TRANSACTION)) + ((LongWritable) next.getFieldValue(OrcRecordUpdater.ORIGINAL_WRITEID)) .set(transactionId); ((IntWritable) next.getFieldValue(OrcRecordUpdater.BUCKET)) .set(bucketProperty); - ((LongWritable) next.getFieldValue(OrcRecordUpdater.CURRENT_TRANSACTION)) + ((LongWritable) next.getFieldValue(OrcRecordUpdater.CURRENT_WRITEID)) .set(transactionId); ((LongWritable) next.getFieldValue(OrcRecordUpdater.ROW_ID)) .set(nextRowId); @@ -445,7 +445,7 @@ static int encodeBucketId(Configuration conf, int bucketId, int statementId) { OriginalReaderPairToRead(ReaderKey key, Reader reader, int bucketId, final RecordIdentifier minKey, final RecordIdentifier maxKey, Reader.Options options, Options mergerOptions, Configuration conf, - ValidTxnList validTxnList, int statementId) throws IOException { + ValidWriteIdList validWriteIdList, int statementId) throws IOException { super(key, bucketId, conf, mergerOptions, statementId); this.reader = reader; assert !mergerOptions.isCompacting(); @@ -473,7 +473,7 @@ static int encodeBucketId(Configuration conf, int bucketId, int statementId) { */ //the split is from something other than the 1st file of the logical bucket - compute offset AcidUtils.Directory directoryState = AcidUtils.getAcidState(mergerOptions.getRootPath(), - conf, validTxnList, false, true); + conf, validWriteIdList, false, true); for (HadoopShims.HdfsFileStatusWithId f : directoryState.getOriginalFiles()) { AcidOutputFormat.Options bucketOptions = AcidUtils.parseBaseOrDeltaBucketFilename(f.getFileStatus().getPath(), conf); @@ -577,7 +577,7 @@ public void next(OrcStruct next) throws IOException { OriginalReaderPairToCompact(ReaderKey key, int bucketId, Reader.Options options, Options mergerOptions, Configuration conf, - ValidTxnList validTxnList, int statementId) throws IOException { + ValidWriteIdList validWriteIdList, int statementId) throws IOException { super(key, bucketId, conf, mergerOptions, statementId); assert mergerOptions.isCompacting() : "Should only be used for Compaction"; this.conf = conf; @@ -588,7 +588,7 @@ public void next(OrcStruct next) throws IOException { assert options.getOffset() == 0; assert options.getMaxOffset() == Long.MAX_VALUE; AcidUtils.Directory directoryState = AcidUtils.getAcidState( - mergerOptions.getRootPath(), conf, validTxnList, false, true); + mergerOptions.getRootPath(), conf, validWriteIdList, false, true); /** * Note that for reading base_x/ or delta_x_x/ with non-acid schema, * {@link Options#getRootPath()} is set to base_x/ or delta_x_x/ which causes all it's @@ -714,7 +714,7 @@ private KeyInterval discoverOriginalKeyBounds(Reader reader, int bucket, boolean isTail = true; RecordIdentifier minKey = null; RecordIdentifier maxKey = null; - TransactionMetaData tfp = TransactionMetaData.findTransactionIDForSynthetcRowIDs( + TransactionMetaData tfp = TransactionMetaData.findWriteIDForSynthetcRowIDs( mergerOptions.getBucketPath(), mergerOptions.getRootPath(), conf); int bucketProperty = encodeBucketId(conf, bucket, tfp.statementId); /** @@ -939,13 +939,13 @@ public Options clone() { Reader reader, boolean isOriginal, int bucket, - ValidTxnList validTxnList, + ValidWriteIdList validWriteIdList, Reader.Options options, Path[] deltaDirectory, Options mergerOptions) throws IOException { this.collapse = collapseEvents; this.offset = options.getOffset(); this.length = options.getLength(); - this.validTxnList = validTxnList; + this.validWriteIdList = validWriteIdList; /** * @since Hive 3.0 * With split update (HIVE-14035) we have base/, delta/ and delete_delta/ - the latter only @@ -1028,7 +1028,7 @@ public Options clone() { AcidUtils.parseBase(mergerOptions.getBaseDir()), mergerOptions.getBaseDir()); } pair = new OriginalReaderPairToCompact(key, bucket, options, readerPairOptions, - conf, validTxnList, + conf, validWriteIdList, 0);//0 since base_x doesn't have a suffix (neither does pre acid write) } else { assert mergerOptions.getBucketPath() != null : " since this is not compaction: " @@ -1036,14 +1036,14 @@ public Options clone() { //if here it's a non-acid schema file - check if from before table was marked transactional //or in base_x/delta_x_x from Load Data Options readerPairOptions = mergerOptions; - TransactionMetaData tfp = TransactionMetaData.findTransactionIDForSynthetcRowIDs( + TransactionMetaData tfp = TransactionMetaData.findWriteIDForSynthetcRowIDs( mergerOptions.getBucketPath(), mergerOptions.getRootPath(), conf); - if(tfp.syntheticTransactionId > 0) { + if(tfp.syntheticWriteId > 0) { readerPairOptions = modifyForNonAcidSchemaRead(mergerOptions, - tfp.syntheticTransactionId, tfp.folder); + tfp.syntheticWriteId, tfp.folder); } pair = new OriginalReaderPairToRead(key, reader, bucket, keyInterval.getMinKey(), - keyInterval.getMaxKey(), options, readerPairOptions, conf, validTxnList, tfp.statementId); + keyInterval.getMaxKey(), options, readerPairOptions, conf, validWriteIdList, tfp.statementId); } } else { if(mergerOptions.isCompacting()) { @@ -1101,10 +1101,10 @@ public Options clone() { assert mergerOptions.isCompacting() : "during regular read anything which is not a" + " delete_delta is treated like base: " + delta; Options rawCompactOptions = modifyForNonAcidSchemaRead(mergerOptions, - deltaDir.getMinTransaction(), delta); + deltaDir.getMinWriteId(), delta); //this will also handle copy_N files if any ReaderPair deltaPair = new OriginalReaderPairToCompact(key, bucket, options, - rawCompactOptions, conf, validTxnList, deltaDir.getStatementId()); + rawCompactOptions, conf, validWriteIdList, deltaDir.getStatementId()); if (deltaPair.nextRecord() != null) { readers.put(key, deltaPair); } @@ -1170,24 +1170,24 @@ public Options clone() { * type files into a base_x/ or delta_x_x. The data in these are then assigned ROW_IDs at read * time and made permanent at compaction time. This is identical to how 'original' files (i.e. * those that existed in the table before it was converted to an Acid table) except that the - * transaction ID to use in the ROW_ID should be that of the transaction that ran the Load Data. + * write ID to use in the ROW_ID should be that of the transaction that ran the Load Data. */ static final class TransactionMetaData { - final long syntheticTransactionId; + final long syntheticWriteId; /** * folder which determines the transaction id to use in synthetic ROW_IDs */ final Path folder; final int statementId; - TransactionMetaData(long syntheticTransactionId, Path folder) { - this(syntheticTransactionId, folder, 0); + TransactionMetaData(long syntheticWriteId, Path folder) { + this(syntheticWriteId, folder, 0); } - TransactionMetaData(long syntheticTransactionId, Path folder, int statementId) { - this.syntheticTransactionId = syntheticTransactionId; + TransactionMetaData(long syntheticWriteId, Path folder, int statementId) { + this.syntheticWriteId = syntheticWriteId; this.folder = folder; this.statementId = statementId; } - static TransactionMetaData findTransactionIDForSynthetcRowIDs(Path splitPath, Path rootPath, + static TransactionMetaData findWriteIDForSynthetcRowIDs(Path splitPath, Path rootPath, Configuration conf) throws IOException { Path parent = splitPath.getParent(); if(rootPath.equals(parent)) { @@ -1205,10 +1205,10 @@ static TransactionMetaData findTransactionIDForSynthetcRowIDs(Path splitPath, Pa else { AcidUtils.ParsedDelta pd = AcidUtils.parsedDelta(parent, AcidUtils.DELTA_PREFIX, parent.getFileSystem(conf)); - assert pd.getMinTransaction() == pd.getMaxTransaction() : + assert pd.getMinWriteId() == pd.getMaxWriteId() : "This a delta with raw non acid schema, must be result of single write, no compaction: " + splitPath; - return new TransactionMetaData(pd.getMinTransaction(), parent, pd.getStatementId()); + return new TransactionMetaData(pd.getMinWriteId(), parent, pd.getStatementId()); } } parent = parent.getParent(); @@ -1227,7 +1227,7 @@ static TransactionMetaData findTransactionIDForSynthetcRowIDs(Path splitPath, Pa /** * This is done to read non-acid schema files ("original") located in base_x/ or delta_x_x/ which * happens as a result of Load Data statement. Setting {@code rootPath} to base_x/ or delta_x_x - * causes {@link AcidUtils#getAcidState(Path, Configuration, ValidTxnList)} in subsequent + * causes {@link AcidUtils#getAcidState(Path, Configuration, ValidWriteIdList)} in subsequent * {@link OriginalReaderPair} object to return the files in this dir * in {@link AcidUtils.Directory#getOriginalFiles()} * @return modified clone of {@code baseOptions} @@ -1350,8 +1350,8 @@ public boolean next(RecordIdentifier recordIdentifier, } // if this transaction isn't ok, skip over it - if (!validTxnList.isTxnValid( - ((ReaderKey) recordIdentifier).getCurrentTransactionId())) { + if (!validWriteIdList.isWriteIdValid( + ((ReaderKey) recordIdentifier).getCurrentWriteId())) { continue; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java index b90ce6e..2e4db22 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java @@ -55,7 +55,7 @@ * A RecordUpdater where the files are stored as ORC. * A note on various record structures: the {@code row} coming in (as in {@link #insert(long, Object)} * for example), is a struct like but what is written to the file - * * is > (see {@link #createEventSchema(ObjectInspector)}) + * * is > (see {@link #createEventSchema(ObjectInspector)}) * So there are OIs here to make the translation. */ public class OrcRecordUpdater implements RecordUpdater { @@ -72,10 +72,10 @@ final static int DELETE_OPERATION = 2; //column indexes of corresponding data in storage layer final static int OPERATION = 0; - final static int ORIGINAL_TRANSACTION = 1; + final static int ORIGINAL_WRITEID = 1; final static int BUCKET = 2; final static int ROW_ID = 3; - final static int CURRENT_TRANSACTION = 4; + final static int CURRENT_WRITEID = 4; final static int ROW = 5; /** * total number of fields (above) @@ -100,8 +100,8 @@ private final FSDataOutputStream flushLengths; private final OrcStruct item; private final IntWritable operation = new IntWritable(); - private final LongWritable currentTransaction = new LongWritable(-1); - private final LongWritable originalTransaction = new LongWritable(-1); + private final LongWritable currentWriteId = new LongWritable(-1); + private final LongWritable originalWriteId = new LongWritable(-1); private final IntWritable bucket = new IntWritable(); private final LongWritable rowId = new LongWritable(); private long insertedRows = 0; @@ -112,12 +112,12 @@ private KeyIndexBuilder deleteEventIndexBuilder; private StructField recIdField = null; // field to look for the record identifier in private StructField rowIdField = null; // field inside recId to look for row id in - private StructField originalTxnField = null; // field inside recId to look for original txn in + private StructField originalWriteIdField = null; // field inside recId to look for original write id in private StructField bucketField = null; // field inside recId to look for bucket in private StructObjectInspector rowInspector; // OI for the original row private StructObjectInspector recIdInspector; // OI for the record identifier struct private LongObjectInspector rowIdInspector; // OI for the long row id inside the recordIdentifier - private LongObjectInspector origTxnInspector; // OI for the original txn inside the record + private LongObjectInspector origWriteIdInspector; // OI for the original write id inside the record // identifer private IntObjectInspector bucketInspector; @@ -126,11 +126,11 @@ static int getOperation(OrcStruct struct) { } static long getCurrentTransaction(OrcStruct struct) { - return ((LongWritable) struct.getFieldValue(CURRENT_TRANSACTION)).get(); + return ((LongWritable) struct.getFieldValue(CURRENT_WRITEID)).get(); } static long getOriginalTransaction(OrcStruct struct) { - return ((LongWritable) struct.getFieldValue(ORIGINAL_TRANSACTION)).get(); + return ((LongWritable) struct.getFieldValue(ORIGINAL_WRITEID)).get(); } static int getBucket(OrcStruct struct) { @@ -184,15 +184,13 @@ static StructObjectInspector createEventSchema(ObjectInspector rowInspector) { fields.add(new OrcStruct.Field("operation", PrimitiveObjectInspectorFactory.writableIntObjectInspector, OPERATION)); fields.add(new OrcStruct.Field("originalTransaction", - PrimitiveObjectInspectorFactory.writableLongObjectInspector, - ORIGINAL_TRANSACTION)); + PrimitiveObjectInspectorFactory.writableLongObjectInspector, ORIGINAL_WRITEID)); fields.add(new OrcStruct.Field("bucket", PrimitiveObjectInspectorFactory.writableIntObjectInspector, BUCKET)); fields.add(new OrcStruct.Field("rowId", PrimitiveObjectInspectorFactory.writableLongObjectInspector, ROW_ID)); fields.add(new OrcStruct.Field("currentTransaction", - PrimitiveObjectInspectorFactory.writableLongObjectInspector, - CURRENT_TRANSACTION)); + PrimitiveObjectInspectorFactory.writableLongObjectInspector, CURRENT_WRITEID)); fields.add(new OrcStruct.Field("row", rowInspector, ROW)); return new OrcStruct.OrcStructInspector(fields); } @@ -246,7 +244,7 @@ static StructObjectInspector createEventSchema(ObjectInspector rowInspector) { } } } - if (options.getMinimumTransactionId() != options.getMaximumTransactionId() + if (options.getMinimumWriteId() != options.getMaximumWriteId() && !options.isWritingBase()){ //throw if file already exists as that should never happen flushLengths = fs.create(OrcAcidUtils.getSideFile(this.path), false, 8, @@ -316,8 +314,8 @@ static StructObjectInspector createEventSchema(ObjectInspector rowInspector) { options.getRecordIdColumn()))); item = new OrcStruct(FIELDS); item.setFieldValue(OPERATION, operation); - item.setFieldValue(CURRENT_TRANSACTION, currentTransaction); - item.setFieldValue(ORIGINAL_TRANSACTION, originalTransaction); + item.setFieldValue(CURRENT_WRITEID, currentWriteId); + item.setFieldValue(ORIGINAL_WRITEID, originalWriteId); item.setFieldValue(BUCKET, bucket); item.setFieldValue(ROW_ID, rowId); } @@ -342,9 +340,9 @@ private ObjectInspector findRecId(ObjectInspector inspector, int rowIdColNum) { List fields = ((StructObjectInspector) recIdField.getFieldObjectInspector()).getAllStructFieldRefs(); // Go by position, not field name, as field names aren't guaranteed. The order of fields - // in RecordIdentifier is transactionId, bucketId, rowId - originalTxnField = fields.get(0); - origTxnInspector = (LongObjectInspector)originalTxnField.getFieldObjectInspector(); + // in RecordIdentifier is writeId, bucketId, rowId + originalWriteIdField = fields.get(0); + origWriteIdInspector = (LongObjectInspector)originalWriteIdField.getFieldObjectInspector(); bucketField = fields.get(1); bucketInspector = (IntObjectInspector) bucketField.getFieldObjectInspector(); rowIdField = fields.get(2); @@ -361,27 +359,27 @@ private ObjectInspector findRecId(ObjectInspector inspector, int rowIdColNum) { * thus even for unbucketed tables, the N in bucket_N file name matches writerId/bucketId even for * late split */ - private void addSimpleEvent(int operation, long currentTransaction, long rowId, Object row) + private void addSimpleEvent(int operation, long currentWriteId, long rowId, Object row) throws IOException { this.operation.set(operation); - this.currentTransaction.set(currentTransaction); + this.currentWriteId.set(currentWriteId); Integer currentBucket = null; - // If this is an insert, originalTransaction should be set to this transaction. If not, + // If this is an insert, originalWriteId should be set to this transaction. If not, // it will be reset by the following if anyway. - long originalTransaction = currentTransaction; + long originalWriteId = currentWriteId; if (operation == DELETE_OPERATION || operation == UPDATE_OPERATION) { Object rowIdValue = rowInspector.getStructFieldData(row, recIdField); - originalTransaction = origTxnInspector.get( - recIdInspector.getStructFieldData(rowIdValue, originalTxnField)); + originalWriteId = origWriteIdInspector.get( + recIdInspector.getStructFieldData(rowIdValue, originalWriteIdField)); rowId = rowIdInspector.get(recIdInspector.getStructFieldData(rowIdValue, rowIdField)); currentBucket = setBucket(bucketInspector.get( recIdInspector.getStructFieldData(rowIdValue, bucketField)), operation); } this.rowId.set(rowId); - this.originalTransaction.set(originalTransaction); + this.originalWriteId.set(originalWriteId); item.setFieldValue(OrcRecordUpdater.OPERATION, new IntWritable(operation)); item.setFieldValue(OrcRecordUpdater.ROW, (operation == DELETE_OPERATION ? null : row)); - indexBuilder.addKey(operation, originalTransaction, bucket.get(), rowId); + indexBuilder.addKey(operation, originalWriteId, bucket.get(), rowId); if (writer == null) { writer = OrcFile.createWriter(path, writerOptions); } @@ -389,18 +387,18 @@ private void addSimpleEvent(int operation, long currentTransaction, long rowId, restoreBucket(currentBucket, operation); } - private void addSplitUpdateEvent(int operation, long currentTransaction, long rowId, Object row) + private void addSplitUpdateEvent(int operation, long currentWriteId, long rowId, Object row) throws IOException { if (operation == INSERT_OPERATION) { // Just insert the record in the usual way, i.e., default to the simple behavior. - addSimpleEvent(operation, currentTransaction, rowId, row); + addSimpleEvent(operation, currentWriteId, rowId, row); return; } this.operation.set(operation); - this.currentTransaction.set(currentTransaction); + this.currentWriteId.set(currentWriteId); Object rowValue = rowInspector.getStructFieldData(row, recIdField); - long originalTransaction = origTxnInspector.get( - recIdInspector.getStructFieldData(rowValue, originalTxnField)); + long originalWriteId = origWriteIdInspector.get( + recIdInspector.getStructFieldData(rowValue, originalWriteIdField)); rowId = rowIdInspector.get( recIdInspector.getStructFieldData(rowValue, rowIdField)); Integer currentBucket = null; @@ -423,54 +421,54 @@ private void addSplitUpdateEvent(int operation, long currentTransaction, long ro // A delete/update generates a delete event for the original row. this.rowId.set(rowId); - this.originalTransaction.set(originalTransaction); + this.originalWriteId.set(originalWriteId); item.setFieldValue(OrcRecordUpdater.OPERATION, new IntWritable(DELETE_OPERATION)); item.setFieldValue(OrcRecordUpdater.ROW, null); // ROW is null for delete events. - deleteEventIndexBuilder.addKey(DELETE_OPERATION, originalTransaction, bucket.get(), rowId); + deleteEventIndexBuilder.addKey(DELETE_OPERATION, originalWriteId, bucket.get(), rowId); deleteEventWriter.addRow(item); restoreBucket(currentBucket, operation); } if (operation == UPDATE_OPERATION) { // A new row is also inserted in the usual delta file for an update event. - addSimpleEvent(INSERT_OPERATION, currentTransaction, insertedRows++, row); + addSimpleEvent(INSERT_OPERATION, currentWriteId, insertedRows++, row); } } @Override - public void insert(long currentTransaction, Object row) throws IOException { - if (this.currentTransaction.get() != currentTransaction) { + public void insert(long currentWriteId, Object row) throws IOException { + if (this.currentWriteId.get() != currentWriteId) { insertedRows = 0; } if (acidOperationalProperties.isSplitUpdate()) { - addSplitUpdateEvent(INSERT_OPERATION, currentTransaction, insertedRows++, row); + addSplitUpdateEvent(INSERT_OPERATION, currentWriteId, insertedRows++, row); } else { - addSimpleEvent(INSERT_OPERATION, currentTransaction, insertedRows++, row); + addSimpleEvent(INSERT_OPERATION, currentWriteId, insertedRows++, row); } rowCountDelta++; } @Override - public void update(long currentTransaction, Object row) throws IOException { - if (this.currentTransaction.get() != currentTransaction) { + public void update(long currentWriteId, Object row) throws IOException { + if (this.currentWriteId.get() != currentWriteId) { insertedRows = 0; } if (acidOperationalProperties.isSplitUpdate()) { - addSplitUpdateEvent(UPDATE_OPERATION, currentTransaction, -1L, row); + addSplitUpdateEvent(UPDATE_OPERATION, currentWriteId, -1L, row); } else { - addSimpleEvent(UPDATE_OPERATION, currentTransaction, -1L, row); + addSimpleEvent(UPDATE_OPERATION, currentWriteId, -1L, row); } } @Override - public void delete(long currentTransaction, Object row) throws IOException { - if (this.currentTransaction.get() != currentTransaction) { + public void delete(long currentWriteId, Object row) throws IOException { + if (this.currentWriteId.get() != currentWriteId) { insertedRows = 0; } if (acidOperationalProperties.isSplitUpdate()) { - addSplitUpdateEvent(DELETE_OPERATION, currentTransaction, -1L, row); + addSplitUpdateEvent(DELETE_OPERATION, currentWriteId, -1L, row); } else { - addSimpleEvent(DELETE_OPERATION, currentTransaction, -1L, row); + addSimpleEvent(DELETE_OPERATION, currentWriteId, -1L, row); } rowCountDelta--; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java index da20004..cbd1fa9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java @@ -27,8 +27,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; @@ -72,7 +72,7 @@ protected float progress = 0.0f; protected Object[] partitionValues; private boolean addPartitionCols = true; - private final ValidTxnList validTxnList; + private final ValidWriteIdList validWriteIdList; private final DeleteEventRegistry deleteEventRegistry; /** * {@link RecordIdentifier}/{@link VirtualColumn#ROWID} information @@ -183,8 +183,8 @@ private VectorizedOrcAcidRowBatchReader(JobConf conf, OrcSplit orcSplit, Reporte partitionValues = null; } - String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); - this.validTxnList = (txnString == null) ? new ValidReadTxnList() : new ValidReadTxnList(txnString); + String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY); + this.validWriteIdList = (txnString == null) ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString); // Clone readerOptions for deleteEvents. Reader.Options deleteEventReaderOptions = readerOptions.clone(); @@ -214,7 +214,7 @@ private VectorizedOrcAcidRowBatchReader(JobConf conf, OrcSplit orcSplit, Reporte } rowIdProjected = areRowIdsProjected(rbCtx); rootPath = orcSplit.getRootDir(); - syntheticProps = computeOffsetAndBucket(orcSplit, conf, validTxnList); + syntheticProps = computeOffsetAndBucket(orcSplit, conf, validWriteIdList); } /** @@ -242,7 +242,7 @@ private OffsetAndBucketProperty(long rowIdOffset, int bucketProperty, long synth * before/during split computation and passing the info in the split. (HIVE-17917) */ private OffsetAndBucketProperty computeOffsetAndBucket( - OrcSplit split, JobConf conf,ValidTxnList validTxnList) throws IOException { + OrcSplit split, JobConf conf,ValidWriteIdList validWriteIdList) throws IOException { if(!needSyntheticRowIds(split.isOriginal(), !deleteEventRegistry.isEmpty(), rowIdProjected)) { if(split.isOriginal()) { /** @@ -252,22 +252,22 @@ private OffsetAndBucketProperty computeOffsetAndBucket( * filter out base/delta files but this makes fewer dependencies) */ OrcRawRecordMerger.TransactionMetaData syntheticTxnInfo = - OrcRawRecordMerger.TransactionMetaData.findTransactionIDForSynthetcRowIDs(split.getPath(), + OrcRawRecordMerger.TransactionMetaData.findWriteIDForSynthetcRowIDs(split.getPath(), split.getRootDir(), conf); return new OffsetAndBucketProperty(-1,-1, - syntheticTxnInfo.syntheticTransactionId); + syntheticTxnInfo.syntheticWriteId); } return null; } long rowIdOffset = 0; OrcRawRecordMerger.TransactionMetaData syntheticTxnInfo = - OrcRawRecordMerger.TransactionMetaData.findTransactionIDForSynthetcRowIDs(split.getPath(), + OrcRawRecordMerger.TransactionMetaData.findWriteIDForSynthetcRowIDs(split.getPath(), split.getRootDir(), conf); int bucketId = AcidUtils.parseBaseOrDeltaBucketFilename(split.getPath(), conf).getBucketId(); int bucketProperty = BucketCodec.V1.encode(new AcidOutputFormat.Options(conf) .statementId(syntheticTxnInfo.statementId).bucket(bucketId)); AcidUtils.Directory directoryState = AcidUtils.getAcidState( syntheticTxnInfo.folder, conf, - validTxnList, false, true); + validWriteIdList, false, true); for (HadoopShims.HdfsFileStatusWithId f : directoryState.getOriginalFiles()) { AcidOutputFormat.Options bucketOptions = AcidUtils.parseBaseOrDeltaBucketFilename(f.getFileStatus().getPath(), conf); @@ -283,7 +283,7 @@ private OffsetAndBucketProperty computeOffsetAndBucket( rowIdOffset += reader.getNumberOfRows(); } return new OffsetAndBucketProperty(rowIdOffset, bucketProperty, - syntheticTxnInfo.syntheticTransactionId); + syntheticTxnInfo.syntheticWriteId); } /** * {@link VectorizedOrcAcidRowBatchReader} is always used for vectorized reads of acid tables. @@ -426,7 +426,7 @@ public boolean next(NullWritable key, VectorizedRowBatch value) throws IOExcepti " to handle original files that require ROW__IDs: " + rootPath); } /** - * {@link RecordIdentifier#getTransactionId()} + * {@link RecordIdentifier#getWriteId()} */ recordIdColumnVector.fields[0].noNulls = true; recordIdColumnVector.fields[0].isRepeating = true; @@ -450,11 +450,11 @@ public boolean next(NullWritable key, VectorizedRowBatch value) throws IOExcepti } //Now populate a structure to use to apply delete events innerRecordIdColumnVector = new ColumnVector[OrcRecordUpdater.FIELDS]; - innerRecordIdColumnVector[OrcRecordUpdater.ORIGINAL_TRANSACTION] = recordIdColumnVector.fields[0]; + innerRecordIdColumnVector[OrcRecordUpdater.ORIGINAL_WRITEID] = recordIdColumnVector.fields[0]; innerRecordIdColumnVector[OrcRecordUpdater.BUCKET] = recordIdColumnVector.fields[1]; innerRecordIdColumnVector[OrcRecordUpdater.ROW_ID] = recordIdColumnVector.fields[2]; //these are insert events so (original txn == current) txn for all rows - innerRecordIdColumnVector[OrcRecordUpdater.CURRENT_TRANSACTION] = recordIdColumnVector.fields[0]; + innerRecordIdColumnVector[OrcRecordUpdater.CURRENT_WRITEID] = recordIdColumnVector.fields[0]; } if(syntheticProps.syntheticTxnId > 0) { //"originals" (written before table was converted to acid) is considered written by @@ -470,7 +470,7 @@ public boolean next(NullWritable key, VectorizedRowBatch value) throws IOExcepti * reader (transactions) is concerned. Since here we are reading 'original' schema file, * all rows in it have been created by the same txn, namely 'syntheticProps.syntheticTxnId' */ - if (!validTxnList.isTxnValid(syntheticProps.syntheticTxnId)) { + if (!validWriteIdList.isWriteIdValid(syntheticProps.syntheticTxnId)) { selectedBitSet.clear(0, vectorizedRowBatchBase.size); } } @@ -514,7 +514,7 @@ public boolean next(NullWritable key, VectorizedRowBatch value) throws IOExcepti // Transfer columnVector objects from base batch to outgoing batch. System.arraycopy(payloadStruct.fields, 0, value.cols, 0, value.getDataColumnCount()); if(rowIdProjected) { - recordIdColumnVector.fields[0] = vectorizedRowBatchBase.cols[OrcRecordUpdater.ORIGINAL_TRANSACTION]; + recordIdColumnVector.fields[0] = vectorizedRowBatchBase.cols[OrcRecordUpdater.ORIGINAL_WRITEID]; recordIdColumnVector.fields[1] = vectorizedRowBatchBase.cols[OrcRecordUpdater.BUCKET]; recordIdColumnVector.fields[2] = vectorizedRowBatchBase.cols[OrcRecordUpdater.ROW_ID]; } @@ -531,24 +531,24 @@ private void findRecordsWithInvalidTransactionIds(VectorizedRowBatch batch, BitS } private void findRecordsWithInvalidTransactionIds(ColumnVector[] cols, int size, BitSet selectedBitSet) { - if (cols[OrcRecordUpdater.CURRENT_TRANSACTION].isRepeating) { + if (cols[OrcRecordUpdater.CURRENT_WRITEID].isRepeating) { // When we have repeating values, we can unset the whole bitset at once // if the repeating value is not a valid transaction. long currentTransactionIdForBatch = ((LongColumnVector) - cols[OrcRecordUpdater.CURRENT_TRANSACTION]).vector[0]; - if (!validTxnList.isTxnValid(currentTransactionIdForBatch)) { + cols[OrcRecordUpdater.CURRENT_WRITEID]).vector[0]; + if (!validWriteIdList.isWriteIdValid(currentTransactionIdForBatch)) { selectedBitSet.clear(0, size); } return; } long[] currentTransactionVector = - ((LongColumnVector) cols[OrcRecordUpdater.CURRENT_TRANSACTION]).vector; + ((LongColumnVector) cols[OrcRecordUpdater.CURRENT_WRITEID]).vector; // Loop through the bits that are set to true and mark those rows as false, if their // current transactions are not valid. for (int setBitIndex = selectedBitSet.nextSetBit(0); setBitIndex >= 0; setBitIndex = selectedBitSet.nextSetBit(setBitIndex+1)) { - if (!validTxnList.isTxnValid(currentTransactionVector[setBitIndex])) { + if (!validWriteIdList.isWriteIdValid(currentTransactionVector[setBitIndex])) { selectedBitSet.clear(setBitIndex); } } @@ -630,19 +630,19 @@ DeleteEventRegistry getDeleteEventRegistry() { private OrcRawRecordMerger.ReaderKey deleteRecordKey; private OrcStruct deleteRecordValue; private Boolean isDeleteRecordAvailable = null; - private ValidTxnList validTxnList; + private ValidWriteIdList validWriteIdList; SortMergedDeleteEventRegistry(JobConf conf, OrcSplit orcSplit, Reader.Options readerOptions) throws IOException { final Path[] deleteDeltas = getDeleteDeltaDirsFromSplit(orcSplit); if (deleteDeltas.length > 0) { int bucket = AcidUtils.parseBaseOrDeltaBucketFilename(orcSplit.getPath(), conf).getBucketId(); - String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); - this.validTxnList = (txnString == null) ? new ValidReadTxnList() : new ValidReadTxnList(txnString); + String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY); + this.validWriteIdList = (txnString == null) ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString); OrcRawRecordMerger.Options mergerOptions = new OrcRawRecordMerger.Options().isDeleteReader(true); assert !orcSplit.isOriginal() : "If this now supports Original splits, set up mergeOptions properly"; this.deleteRecords = new OrcRawRecordMerger(conf, true, null, false, bucket, - validTxnList, readerOptions, deleteDeltas, + validWriteIdList, readerOptions, deleteDeltas, mergerOptions); this.deleteRecordKey = new OrcRawRecordMerger.ReaderKey(); this.deleteRecordValue = this.deleteRecords.createValue(); @@ -671,8 +671,8 @@ public void findDeletedRecords(ColumnVector[] cols, int size, BitSet selectedBit } long[] originalTransaction = - cols[OrcRecordUpdater.ORIGINAL_TRANSACTION].isRepeating ? null - : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_TRANSACTION]).vector; + cols[OrcRecordUpdater.ORIGINAL_WRITEID].isRepeating ? null + : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_WRITEID]).vector; long[] bucket = cols[OrcRecordUpdater.BUCKET].isRepeating ? null : ((LongColumnVector) cols[OrcRecordUpdater.BUCKET]).vector; @@ -682,7 +682,7 @@ public void findDeletedRecords(ColumnVector[] cols, int size, BitSet selectedBit // The following repeatedX values will be set, if any of the columns are repeating. long repeatedOriginalTransaction = (originalTransaction != null) ? -1 - : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_TRANSACTION]).vector[0]; + : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_WRITEID]).vector[0]; long repeatedBucket = (bucket != null) ? -1 : ((LongColumnVector) cols[OrcRecordUpdater.BUCKET]).vector[0]; long repeatedRowId = (rowId != null) ? -1 @@ -828,12 +828,12 @@ public String toString() { private final RecordReader recordReader; private int indexPtrInBatch; private final int bucketForSplit; // The bucket value should be same for all the records. - private final ValidTxnList validTxnList; + private final ValidWriteIdList validWriteIdList; private boolean isBucketPropertyRepeating; private final boolean isBucketedTable; DeleteReaderValue(Reader deleteDeltaReader, Reader.Options readerOptions, int bucket, - ValidTxnList validTxnList, boolean isBucketedTable) throws IOException { + ValidWriteIdList validWriteIdList, boolean isBucketedTable) throws IOException { this.recordReader = deleteDeltaReader.rowsOptions(readerOptions); this.bucketForSplit = bucket; this.batch = deleteDeltaReader.getSchema().createRowBatch(); @@ -841,7 +841,7 @@ public String toString() { this.batch = null; // Oh! the first batch itself was null. Close the reader. } this.indexPtrInBatch = 0; - this.validTxnList = validTxnList; + this.validWriteIdList = validWriteIdList; this.isBucketedTable = isBucketedTable; checkBucketId();//check 1st batch } @@ -866,7 +866,7 @@ public boolean next(DeleteRecordKey deleteRecordKey) throws IOException { checkBucketId(deleteRecordKey.bucketProperty); } ++indexPtrInBatch; - if (validTxnList.isTxnValid(currentTransaction)) { + if (validWriteIdList.isWriteIdValid(currentTransaction)) { isValidNext = true; } } @@ -878,17 +878,17 @@ public void close() throws IOException { } private long setCurrentDeleteKey(DeleteRecordKey deleteRecordKey) { int originalTransactionIndex = - batch.cols[OrcRecordUpdater.ORIGINAL_TRANSACTION].isRepeating ? 0 : indexPtrInBatch; + batch.cols[OrcRecordUpdater.ORIGINAL_WRITEID].isRepeating ? 0 : indexPtrInBatch; long originalTransaction = - ((LongColumnVector) batch.cols[OrcRecordUpdater.ORIGINAL_TRANSACTION]).vector[originalTransactionIndex]; + ((LongColumnVector) batch.cols[OrcRecordUpdater.ORIGINAL_WRITEID]).vector[originalTransactionIndex]; int bucketPropertyIndex = batch.cols[OrcRecordUpdater.BUCKET].isRepeating ? 0 : indexPtrInBatch; int bucketProperty = (int)((LongColumnVector)batch.cols[OrcRecordUpdater.BUCKET]).vector[bucketPropertyIndex]; long rowId = ((LongColumnVector) batch.cols[OrcRecordUpdater.ROW_ID]).vector[indexPtrInBatch]; int currentTransactionIndex = - batch.cols[OrcRecordUpdater.CURRENT_TRANSACTION].isRepeating ? 0 : indexPtrInBatch; + batch.cols[OrcRecordUpdater.CURRENT_WRITEID].isRepeating ? 0 : indexPtrInBatch; long currentTransaction = - ((LongColumnVector) batch.cols[OrcRecordUpdater.CURRENT_TRANSACTION]).vector[currentTransactionIndex]; + ((LongColumnVector) batch.cols[OrcRecordUpdater.CURRENT_WRITEID]).vector[currentTransactionIndex]; deleteRecordKey.set(originalTransaction, bucketProperty, rowId); return currentTransaction; } @@ -976,14 +976,14 @@ public int compareTo(CompressedOtid other) { private TreeMap sortMerger; private long rowIds[]; private CompressedOtid compressedOtids[]; - private ValidTxnList validTxnList; + private ValidWriteIdList validWriteIdList; private Boolean isEmpty = null; ColumnizedDeleteEventRegistry(JobConf conf, OrcSplit orcSplit, Reader.Options readerOptions) throws IOException, DeleteEventsOverflowMemoryException { int bucket = AcidUtils.parseBaseOrDeltaBucketFilename(orcSplit.getPath(), conf).getBucketId(); - String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); - this.validTxnList = (txnString == null) ? new ValidReadTxnList() : new ValidReadTxnList(txnString); + String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY); + this.validWriteIdList = (txnString == null) ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString); this.sortMerger = new TreeMap(); this.rowIds = null; this.compressedOtids = null; @@ -1025,7 +1025,7 @@ public int compareTo(CompressedOtid other) { throw new DeleteEventsOverflowMemoryException(); } DeleteReaderValue deleteReaderValue = new DeleteReaderValue(deleteDeltaReader, - readerOptions, bucket, validTxnList, isBucketedTable); + readerOptions, bucket, validWriteIdList, isBucketedTable); DeleteRecordKey deleteRecordKey = new DeleteRecordKey(); if (deleteReaderValue.next(deleteRecordKey)) { sortMerger.put(deleteRecordKey, deleteReaderValue); @@ -1165,10 +1165,10 @@ public void findDeletedRecords(ColumnVector[] cols, int size, BitSet selectedBit // check if it is deleted or not. long[] originalTransactionVector = - cols[OrcRecordUpdater.ORIGINAL_TRANSACTION].isRepeating ? null - : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_TRANSACTION]).vector; + cols[OrcRecordUpdater.ORIGINAL_WRITEID].isRepeating ? null + : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_WRITEID]).vector; long repeatedOriginalTransaction = (originalTransactionVector != null) ? -1 - : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_TRANSACTION]).vector[0]; + : ((LongColumnVector) cols[OrcRecordUpdater.ORIGINAL_WRITEID]).vector[0]; long[] bucketProperties = cols[OrcRecordUpdater.BUCKET].isRepeating ? null diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 3968b0e..d37e4e7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -31,7 +31,7 @@ Licensed to the Apache Software Foundation (ASF) under one import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.common.JavaUtils; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.LockComponentBuilder; import org.apache.hadoop.hive.metastore.LockRequestBuilder; @@ -50,6 +50,7 @@ Licensed to the Apache Software Foundation (ASF) under one import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -84,6 +85,12 @@ Licensed to the Apache Software Foundation (ASF) under one * transaction id. Thus is 1 is first transaction id. */ private volatile long txnId = 0; + + /** + * The local cache of table write IDs associated with current transaction + */ + private HashMap tableWriteIds = new HashMap<>(); + /** * assigns a unique monotonically increasing ID to each statement * which is part of an open transaction. This is used by storage @@ -91,7 +98,7 @@ Licensed to the Apache Software Foundation (ASF) under one * to keep apart multiple writes of the same data within the same transaction * Also see {@link org.apache.hadoop.hive.ql.io.AcidOutputFormat.Options} */ - private int writeId = -1; + private int stmtId = -1; /** * counts number of statements in the current transaction */ @@ -208,8 +215,9 @@ long openTxn(Context ctx, String user, long delay) throws LockException { } try { txnId = getMS().openTxn(user); - writeId = 0; + stmtId = 0; numStatements = 0; + tableWriteIds.clear(); isExplicitTransaction = false; startTransactionCount = 0; LOG.debug("Opened " + JavaUtils.txnIdToString(txnId)); @@ -241,7 +249,8 @@ public void acquireLocks(QueryPlan plan, Context ctx, String username) throws Lo catch(LockException e) { if(e.getCause() instanceof TxnAbortedException) { txnId = 0; - writeId = -1; + stmtId = -1; + tableWriteIds.clear(); } throw e; } @@ -597,8 +606,9 @@ public void commitTxn() throws LockException { e); } finally { txnId = 0; - writeId = -1; + stmtId = -1; numStatements = 0; + tableWriteIds.clear(); } } @@ -622,8 +632,9 @@ public void rollbackTxn() throws LockException { e); } finally { txnId = 0; - writeId = -1; + stmtId = -1; numStatements = 0; + tableWriteIds.clear(); } } @@ -742,10 +753,12 @@ private void stopHeartbeat() throws LockException { } @Override - public ValidTxnList getValidTxns() throws LockException { + public ValidTxnWriteIdList getValidWriteIds(List tableList) throws LockException { init(); try { - return getMS().getValidTxns(txnId); + // TODO (Sankar): Need to get ValidTxnList first and store it in DbTxnManager. It is needed for + // multi-statement txns. Also, need to pass ValidTxnList to get getValidWriteIds. + return getMS().getValidWriteIds(txnId, tableList); } catch (TException e) { throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e); @@ -886,9 +899,28 @@ public long getCurrentTxnId() { return txnId; } @Override - public int getWriteIdAndIncrement() { + public int getStmtIdAndIncrement() { assert isTxnOpen(); - return writeId++; + return stmtId++; + } + + @Override + public long getTableWriteId(String dbName, String tableName) throws LockException { + String fullTableName = AcidUtils.getFullTableName(dbName, tableName); + if (tableWriteIds.containsKey(fullTableName)) { + return tableWriteIds.get(fullTableName); + } + + try { + long writeId = 0; // If not called within a txn, then just return default Id of 0 + if (isTxnOpen()) { + writeId = getMS().allocateTableWriteId(txnId, dbName, tableName); + tableWriteIds.put(fullTableName, writeId); + } + return writeId; + } catch (TException e) { + throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e); + } } private static long getHeartbeatInterval(Configuration conf) throws LockException { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index cf8bc7f..aeb62cd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.lockmgr; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.common.ValidTxnList; @@ -60,12 +61,15 @@ public boolean isTxnOpen() { public long getCurrentTxnId() { return 0L; } - @Override - public int getWriteIdAndIncrement() { + public int getStmtIdAndIncrement() { return 0; } @Override + public long getTableWriteId(String dbName, String tableName) throws LockException { + return 0L; + } + @Override public HiveLockManager getLockManager() throws LockException { if (lockMgr == null) { boolean supportConcurrency = @@ -209,8 +213,8 @@ public void heartbeat() throws LockException { } @Override - public ValidTxnList getValidTxns() throws LockException { - return new ValidReadTxnList(); + public ValidTxnWriteIdList getValidWriteIds(List tableList) throws LockException { + return new ValidTxnWriteIdList(); } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java index 4f9f0c2..04701ee 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hive.ql.lockmgr; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.Driver.LockedDriverState; import org.apache.hadoop.hive.ql.QueryPlan; @@ -121,16 +121,17 @@ void heartbeat() throws LockException; /** - * Get the transactions that are currently valid. The resulting - * {@link ValidTxnList} object is a thrift object and can + * Get the table write Ids that are valid for the current transaction. The resulting + * {@link ValidTxnWriteIdList} object is a thrift object and can * be passed to the processing * tasks for use in the reading the data. This call should be made once up - * front by the planner and should never be called on the backend, + * front by the planner per table and should never be called on the backend, * as this will violate the isolation level semantics. - * @return list of valid transactions. + * @param tableList list of tables (.) read/written by current transaction. + * @return list of valid table write Ids. * @throws LockException */ - ValidTxnList getValidTxns() throws LockException; + ValidTxnWriteIdList getValidWriteIds(List tableList) throws LockException; /** * Get the name for currently installed transaction manager. @@ -222,9 +223,14 @@ long getCurrentTxnId(); /** + * if {@code isTxnOpen()}, returns the table write ID associated with current active transaction + */ + long getTableWriteId(String dbName, String tableName) throws LockException; + + /** * Should be though of more as a unique write operation ID in a given txn (at QueryPlan level). * Each statement writing data within a multi statement txn should have a unique WriteId. * Even a single statement, (e.g. Merge, multi-insert may generates several writes). */ - int getWriteIdAndIncrement(); + int getStmtIdAndIncrement(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index b1e05df..752974a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1970,7 +1970,7 @@ else if(!isAcidIUDoperation && isFullAcidTable) { * delta_x_x directory - same as any other Acid write. This method modifies the destPath to add * this path component. * @param txnId - id of current transaction (in which this operation is running) - * @param stmtId - see {@link DbTxnManager#getWriteIdAndIncrement()} + * @param stmtId - see {@link DbTxnManager#getStmtIdAndIncrement()} * @return appropriately modified path */ private Path fixFullAcidPathForLoadData(LoadFileType loadFileType, Path destPath, long txnId, int stmtId, Table tbl) throws HiveException { @@ -2232,13 +2232,13 @@ private void constructOneLBLocationMap(FileStatus fSta, * @param loadFileType * @param numDP number of dynamic partitions * @param isAcid true if this is an ACID operation - * @param txnId txnId, can be 0 unless isAcid == true + * @param writeId writeId, can be 0 unless isAcid == true * @return partition map details (PartitionSpec and Partition) * @throws HiveException */ public Map, Partition> loadDynamicPartitions(final Path loadPath, final String tableName, final Map partSpec, final LoadFileType loadFileType, - final int numDP, final int numLB, final boolean isAcid, final long txnId, final int stmtId, + final int numDP, final int numLB, final boolean isAcid, final long writeId, final int stmtId, final boolean hasFollowingStatsTask, final AcidUtils.Operation operation, boolean isInsertOverwrite) throws HiveException { @@ -2254,7 +2254,7 @@ private void constructOneLBLocationMap(FileStatus fSta, // Get all valid partition paths and existing partitions for them (if any) final Table tbl = getTable(tableName); - final Set validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, txnId, stmtId, + final Set validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, writeId, stmtId, AcidUtils.isInsertOnlyTable(tbl.getParameters()), isInsertOverwrite); final int partsToLoad = validPartitions.size(); @@ -2289,7 +2289,7 @@ public Void call() throws Exception { // load the partition Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, loadFileType, true, numLB > 0, - false, isAcid, hasFollowingStatsTask, txnId, stmtId); + false, isAcid, hasFollowingStatsTask, writeId, stmtId); partitionsMap.put(fullPartSpec, newPartition); if (inPlaceEligible) { @@ -2348,8 +2348,9 @@ public Void call() throws Exception { for (Partition p : partitionsMap.values()) { partNames.add(p.getName()); } - getMSC().addDynamicPartitions(txnId, tbl.getDbName(), tbl.getTableName(), - partNames, AcidUtils.toDataOperationType(operation)); + getMSC().addDynamicPartitions(parentSession.getTxnMgr().getCurrentTxnId(), + tbl.getDbName(), tbl.getTableName(), partNames, + AcidUtils.toDataOperationType(operation)); } LOG.info("Loaded " + partitionsMap.size() + " partitions"); return partitionsMap; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 69447d9..2cca0fc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -1270,7 +1270,7 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, FileSinkDesc fsInputDesc = fsInput.getConf(); if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("Creating merge work from " + System.identityHashCode(fsInput) - + " with write ID " + (fsInputDesc.isMmTable() ? fsInputDesc.getTransactionId() : null) + + " with write ID " + (fsInputDesc.isMmTable() ? fsInputDesc.getTableWriteId() : null) + " into " + finalName); } @@ -1280,7 +1280,7 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, fsInputDesc.getTableInfo().getInputFileFormatClass().equals(OrcInputFormat.class)); RowSchema inputRS = fsInput.getSchema(); - Long srcMmWriteId = fsInputDesc.isMmTable() ? fsInputDesc.getTransactionId() : null; + Long srcMmWriteId = fsInputDesc.isMmTable() ? fsInputDesc.getTableWriteId() : null; FileSinkDesc fsOutputDesc = null; TableScanOperator tsMerge = null; if (!isBlockMerge) { @@ -1675,7 +1675,7 @@ public static MapWork createMergeTask(FileSinkDesc fsInputDesc, Path finalName, fmd = new OrcFileMergeDesc(); } fmd.setIsMmTable(fsInputDesc.isMmTable()); - fmd.setTxnId(fsInputDesc.getTransactionId()); + fmd.setWriteId(fsInputDesc.getTableWriteId()); int stmtId = fsInputDesc.getStatementId(); fmd.setStmtId(stmtId == -1 ? 0 : stmtId); fmd.setDpCtx(fsInputDesc.getDynPartCtx()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index d159e4b..516c8fa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -1592,7 +1592,7 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc); truncateTblDesc.setOutputDir(queryTmpdir); LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, - partSpec == null ? new HashMap<>() : partSpec, null); + partSpec == null ? new HashMap<>() : partSpec); ltd.setLbCtx(lbCtx); @SuppressWarnings("unchecked") Task moveTsk = @@ -2256,7 +2256,7 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, mergeDesc.setOutputDir(queryTmpdir); // No need to handle MM tables - unsupported path. LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, - partSpec == null ? new HashMap<>() : partSpec, null); + partSpec == null ? new HashMap<>() : partSpec); ltd.setLbCtx(lbCtx); Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index 54f5bab..ca06faa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; +import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -276,8 +277,13 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { Long txnId = null; int stmtId = -1; if (AcidUtils.isTransactionalTable(ts.tableHandle)) { - txnId = SessionState.get().getTxnMgr().getCurrentTxnId(); - stmtId = SessionState.get().getTxnMgr().getWriteIdAndIncrement(); + try { + txnId = SessionState.get().getTxnMgr().getTableWriteId(ts.tableHandle.getDbName(), + ts.tableHandle.getTableName()); + } catch (LockException ex) { + throw new SemanticException("Failed to allocate the write id", ex); + } + stmtId = SessionState.get().getTxnMgr().getStmtIdAndIncrement(); } LoadTableDesc loadTableWork; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index c2e2499..4db6e24 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -122,6 +122,8 @@ import org.apache.hadoop.hive.ql.lib.GraphWalker; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.hadoop.hive.ql.metadata.DummyPartition; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -6653,6 +6655,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) Map partSpec = null; boolean isMmTable = false, isMmCtas = false; Long txnId = null; + HiveTxnManager txnMgr = SessionState.get().getTxnMgr(); switch (dest_type.intValue()) { case QBMetaData.DEST_TABLE: { @@ -6732,11 +6735,15 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) //todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM checkAcidConstraints(qb, table_desc, dest_tab); } - if (isMmTable) { - txnId = SessionState.get().getTxnMgr().getCurrentTxnId(); - } else { - txnId = acidOp == Operation.NOT_ACID ? null : - SessionState.get().getTxnMgr().getCurrentTxnId(); + try { + if (isMmTable) { + txnId = txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName()); + } else { + txnId = acidOp == Operation.NOT_ACID ? null : + txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName()); + } + } catch (LockException ex) { + throw new SemanticException("Failed to allocate write Id", ex); } boolean isReplace = !qb.getParseInfo().isInsertIntoTable( dest_tab.getDbName(), dest_tab.getTableName()); @@ -6807,11 +6814,15 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) //todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM? checkAcidConstraints(qb, table_desc, dest_tab); } - if (isMmTable) { - txnId = SessionState.get().getTxnMgr().getCurrentTxnId(); - } else { - txnId = (acidOp == Operation.NOT_ACID) ? null : - SessionState.get().getTxnMgr().getCurrentTxnId(); + try { + if (isMmTable) { + txnId = txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName()); + } else { + txnId = (acidOp == Operation.NOT_ACID) ? null : + txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName()); + } + } catch (LockException ex) { + throw new SemanticException("Failed to allocate write Id", ex); } ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp, txnId); // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old @@ -6851,7 +6862,11 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) destTableIsMaterialization = tblDesc.isMaterialization(); if (AcidUtils.isInsertOnlyTable(tblDesc.getTblProps(), true)) { isMmTable = isMmCtas = true; - txnId = SessionState.get().getTxnMgr().getCurrentTxnId(); + try { + txnId = txnMgr.getTableWriteId(tblDesc.getDatabaseName(), tblDesc.getTableName()); + } catch (LockException ex) { + throw new SemanticException("Failed to allocate write Id", ex); + } tblDesc.setInitialMmWriteId(txnId); } } else if (viewDesc != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java index 4da868c..6bd0053 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java @@ -28,7 +28,7 @@ private int listBucketingDepth; private boolean hasDynamicPartitions; private boolean isListBucketingAlterTableConcatenate; - private Long txnId; + private Long writeId; private int stmtId; private boolean isMmTable; @@ -77,12 +77,12 @@ public void setListBucketingAlterTableConcatenate(boolean isListBucketingAlterTa this.isListBucketingAlterTableConcatenate = isListBucketingAlterTableConcatenate; } - public Long getTxnId() { - return txnId; + public Long getWriteId() { + return writeId; } - public void setTxnId(Long txnId) { - this.txnId = txnId; + public void setWriteId(Long writeId) { + this.writeId = writeId; } public int getStmtId() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index 92b8031..ce61fc5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -90,7 +90,7 @@ // Record what type of write this is. Default is non-ACID (ie old style). private AcidUtils.Operation writeType = AcidUtils.Operation.NOT_ACID; - private long txnId = 0; // transaction id for this operation + private long tableWriteId = 0; // table write id for this operation private int statementId = -1; private transient Table table; @@ -167,7 +167,7 @@ public Object clone() throws CloneNotSupportedException { ret.setStatsReliable(statsReliable); ret.setDpSortState(dpSortState); ret.setWriteType(writeType); - ret.setTransactionId(txnId); + ret.setTableWriteId(tableWriteId); ret.setStatementId(statementId); ret.setStatsTmpDir(statsTmpDir); ret.setIsMerge(isMerge); @@ -207,7 +207,7 @@ public Path getFinalDirName() { public Path getMergeInputDirName() { Path root = getFinalDirName(); if (isMmTable()) { - return new Path(root, AcidUtils.deltaSubdir(txnId, txnId, statementId)); + return new Path(root, AcidUtils.deltaSubdir(tableWriteId, tableWriteId, statementId)); } else { return root; } @@ -483,11 +483,11 @@ public void setWriteType(AcidUtils.Operation type) { public String getWriteTypeString() { return getWriteType() == AcidUtils.Operation.NOT_ACID ? null : getWriteType().toString(); } - public void setTransactionId(long id) { - txnId = id; + public void setTableWriteId(long id) { + tableWriteId = id; } - public long getTransactionId() { - return txnId; + public long getTableWriteId() { + return tableWriteId; } public void setStatementId(int id) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index a40c486..5cddc9a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -120,9 +120,9 @@ public LoadTableDesc(final Path sourcePath, */ public LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, - final Map partitionSpec, Long txnId) { + final Map partitionSpec) { this(sourcePath, table, partitionSpec, LoadFileType.REPLACE_ALL, - AcidUtils.Operation.NOT_ACID, txnId); + AcidUtils.Operation.NOT_ACID, null); } public LoadTableDesc(final Path sourcePath, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java index 661446d..61495e7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java @@ -102,6 +102,9 @@ // input file name (big) to bucket number private Map bucketFileNameMapping; + private String dbName = null; + private String tableName = null; + private boolean isMetadataOnly = false; private boolean isAcidTable; @@ -135,6 +138,10 @@ public TableScanDesc(final String alias, List vcs, Table tblMetad this.alias = alias; this.virtualCols = vcs; this.tableMetadata = tblMetadata; + if (tblMetadata != null) { + dbName = tblMetadata.getDbName(); + tableName = tblMetadata.getTableName(); + } isAcidTable = AcidUtils.isAcidTable(this.tableMetadata); if (isAcidTable) { acidOperationalProperties = AcidUtils.getAcidOperationalProperties(this.tableMetadata); @@ -154,12 +161,12 @@ public String getAlias() { @Explain(displayName = "table", jsonOnly = true) public String getTableName() { - return this.tableMetadata.getTableName(); + return this.tableName; } @Explain(displayName = "database", jsonOnly = true) public String getDatabaseName() { - return this.tableMetadata.getDbName(); + return this.dbName; } @Explain(displayName = "columns", jsonOnly = true) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 02097c8..df9fa00 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -23,8 +23,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.common.ValidReadTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; @@ -243,10 +243,10 @@ private void clean(CompactionInfo ci) throws MetaException { /** * Each Compaction only compacts as far as the highest txn id such that all txns below it - * are resolved (i.e. not opened). This is what "highestTxnId" tracks. This is only tracked - * since Hive 1.3.0/2.0 - thus may be 0. See ValidCompactorTxnList and uses for more info. + * are resolved (i.e. not opened). This is what "highestWriteId" tracks. This is only tracked + * since Hive 1.3.0/2.0 - thus may be 0. See ValidCompactorWriteIdList and uses for more info. * - * We only want to clean up to the highestTxnId - otherwise we risk deleteing deltas from + * We only want to clean up to the highestWriteId - otherwise we risk deleteing deltas from * under an active reader. * * Suppose we have deltas D2 D3 for table T, i.e. the last compaction created D3 so now there is a @@ -255,10 +255,10 @@ private void clean(CompactionInfo ci) throws MetaException { * Between that check and removeFiles() a query starts (it will be reading D3) and another compaction * completes which creates D4. * Now removeFiles() (more specifically AcidUtils.getAcidState()) will declare D3 to be obsolete - * unless ValidTxnList is "capped" at highestTxnId. + * unless ValidTxnList is "capped" at highestWriteId. */ - final ValidTxnList txnList = ci.highestTxnId > 0 ? - new ValidReadTxnList(new long[0], new BitSet(), ci.highestTxnId) : new ValidReadTxnList(); + final ValidWriteIdList txnList = ci.highestWriteId > 0 ? + new ValidReaderWriteIdList(ci.getFullTableName(), new long[0], new BitSet(), ci.highestWriteId) : new ValidReaderWriteIdList(); if (runJobAsSelf(ci.runAs)) { removeFiles(location, txnList); @@ -288,8 +288,8 @@ public Object run() throws Exception { } } - private void removeFiles(String location, ValidTxnList txnList) throws IOException { - AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(location), conf, txnList); + private void removeFiles(String location, ValidWriteIdList writeIdList) throws IOException { + AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(location), conf, writeIdList); List obsoleteDirs = dir.getObsolete(); List filesToDelete = new ArrayList(obsoleteDirs.size()); for (FileStatus stat : obsoleteDirs) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index 0e456df..b0c62f6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -34,8 +34,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StringableMap; -import org.apache.hadoop.hive.common.ValidCompactorTxnList; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -109,7 +109,7 @@ public CompactorMR() { } private JobConf createBaseJobConf(HiveConf conf, String jobName, Table t, StorageDescriptor sd, - ValidTxnList txns, CompactionInfo ci) { + ValidWriteIdList writeIds, CompactionInfo ci) { JobConf job = new JobConf(conf); job.setJobName(jobName); job.setOutputKeyClass(NullWritable.class); @@ -134,7 +134,7 @@ private JobConf createBaseJobConf(HiveConf conf, String jobName, Table t, Storag job.setBoolean(IS_COMPRESSED, sd.isCompressed()); job.set(TABLE_PROPS, new StringableMap(t.getParameters()).toString()); job.setInt(NUM_BUCKETS, sd.getNumBuckets()); - job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString()); + job.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIds.toString()); overrideMRProps(job, t.getParameters()); // override MR properties from tblproperties if applicable if (ci.properties != null) { overrideTblProps(job, t.getParameters(), ci.properties); @@ -196,12 +196,12 @@ private void overrideMRProps(JobConf job, Map properties) { * @param jobName name to run this job with * @param t metastore table * @param sd metastore storage descriptor - * @param txns list of valid transactions + * @param writeIds list of valid write ids * @param ci CompactionInfo * @throws java.io.IOException if the job fails */ void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, - ValidTxnList txns, CompactionInfo ci, Worker.StatsUpdater su, TxnStore txnHandler) throws IOException { + ValidWriteIdList writeIds, CompactionInfo ci, Worker.StatsUpdater su, TxnStore txnHandler) throws IOException { if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION)) { throw new RuntimeException(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION.name() + "=true"); @@ -212,18 +212,18 @@ void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, if (AcidUtils.isInsertOnlyTable(t.getParameters())) { LOG.debug("Going to delete directories for aborted transactions for MM table " + t.getDbName() + "." + t.getTableName()); - removeFiles(conf, sd.getLocation(), txns, t); + removeFiles(conf, sd.getLocation(), writeIds, t); return; } - JobConf job = createBaseJobConf(conf, jobName, t, sd, txns, ci); + JobConf job = createBaseJobConf(conf, jobName, t, sd, writeIds, ci); // Figure out and encode what files we need to read. We do this here (rather than in // getSplits below) because as part of this we discover our minimum and maximum transactions, // and discovering that in getSplits is too late as we then have no way to pass it to our // mapper. - AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, txns, false, true); + AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, writeIds, false, true); List parsedDeltas = dir.getCurrentDirectories(); int maxDeltastoHandle = conf.getIntVar(HiveConf.ConfVars.COMPACTOR_MAX_NUM_DELTA); if(parsedDeltas.size() > maxDeltastoHandle) { @@ -241,14 +241,14 @@ void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, "runaway/mis-configured process writing to ACID tables, especially using Streaming Ingest API."); int numMinorCompactions = parsedDeltas.size() / maxDeltastoHandle; for(int jobSubId = 0; jobSubId < numMinorCompactions; jobSubId++) { - JobConf jobMinorCompact = createBaseJobConf(conf, jobName + "_" + jobSubId, t, sd, txns, ci); + JobConf jobMinorCompact = createBaseJobConf(conf, jobName + "_" + jobSubId, t, sd, writeIds, ci); launchCompactionJob(jobMinorCompact, null, CompactionType.MINOR, null, parsedDeltas.subList(jobSubId * maxDeltastoHandle, (jobSubId + 1) * maxDeltastoHandle), maxDeltastoHandle, -1, conf, txnHandler, ci.id, jobName); } //now recompute state since we've done minor compactions and have different 'best' set of deltas - dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, txns); + dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, writeIds); } StringableList dirsToSearch = new StringableList(); @@ -279,8 +279,8 @@ void run(HiveConf conf, String jobName, Table t, StorageDescriptor sd, if (parsedDeltas.size() == 0 && dir.getOriginalFiles().size() == 0) { // Skip compaction if there's no delta files AND there's no original files String minOpenInfo = "."; - if(txns.getMinOpenTxn() != null) { - minOpenInfo = " with min Open " + JavaUtils.txnIdToString(txns.getMinOpenTxn()) + + if(writeIds.getMinOpenWriteId() != null) { + minOpenInfo = " with min Open " + JavaUtils.writeIdToString(writeIds.getMinOpenWriteId()) + ". Compaction cannot compact above this txnid"; } LOG.error("No delta files or original files found to compact in " + sd.getLocation() + @@ -315,8 +315,8 @@ private void launchCompactionJob(JobConf job, Path baseDir, CompactionType compa LOG.debug("Adding delta " + delta.getPath() + " to directories to search"); dirsToSearch.add(delta.getPath()); deltaDirs.add(delta.getPath()); - minTxn = Math.min(minTxn, delta.getMinTransaction()); - maxTxn = Math.max(maxTxn, delta.getMaxTransaction()); + minTxn = Math.min(minTxn, delta.getMinWriteId()); + maxTxn = Math.max(maxTxn, delta.getMaxWriteId()); } if (baseDir != null) job.set(BASE_DIR, baseDir.toString()); @@ -378,9 +378,9 @@ private void setColumnTypes(JobConf job, List cols) { } // Remove the directories for aborted transactions only - private void removeFiles(HiveConf conf, String location, ValidTxnList txnList, Table t) + private void removeFiles(HiveConf conf, String location, ValidWriteIdList writeIdList, Table t) throws IOException { - AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(location), conf, txnList, + AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(location), conf, writeIdList, Ref.from(false), false, t.getParameters()); // For MM table, we only want to delete delta dirs for aborted txns. List abortedDirs = dir.getAbortedDirectories(); @@ -717,13 +717,13 @@ public void map(WritableComparable key, CompactorInputSplit split, @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class AcidInputFormat aif = instantiate(AcidInputFormat.class, jobConf.get(INPUT_FORMAT_CLASS_NAME)); - ValidTxnList txnList = - new ValidCompactorTxnList(jobConf.get(ValidTxnList.VALID_TXNS_KEY)); + ValidWriteIdList writeIdList = + new ValidCompactorWriteIdList(jobConf.get(ValidWriteIdList.VALID_WRITEIDS_KEY)); boolean isMajor = jobConf.getBoolean(IS_MAJOR, false); AcidInputFormat.RawReader reader = aif.getRawReader(jobConf, isMajor, split.getBucket(), - txnList, split.getBaseDir(), split.getDeltaDirs()); + writeIdList, split.getBaseDir(), split.getDeltaDirs()); RecordIdentifier identifier = reader.createKey(); V value = reader.createValue(); getWriter(reporter, reader.getObjectInspector(), split.getBucket()); @@ -778,8 +778,8 @@ private void getWriter(Reporter reporter, ObjectInspector inspector, .isCompressed(jobConf.getBoolean(IS_COMPRESSED, false)) .tableProperties(new StringableMap(jobConf.get(TABLE_PROPS)).toProperties()) .reporter(reporter) - .minimumTransactionId(jobConf.getLong(MIN_TXN, Long.MAX_VALUE)) - .maximumTransactionId(jobConf.getLong(MAX_TXN, Long.MIN_VALUE)) + .minimumWriteId(jobConf.getLong(MIN_TXN, Long.MAX_VALUE)) + .maximumWriteId(jobConf.getLong(MAX_TXN, Long.MIN_VALUE)) .bucket(bucket) .statementId(-1);//setting statementId == -1 makes compacted delta files use //delta_xxxx_yyyy format @@ -803,8 +803,8 @@ private void getDeleteEventWriter(Reporter reporter, ObjectInspector inspector, .isCompressed(jobConf.getBoolean(IS_COMPRESSED, false)) .tableProperties(new StringableMap(jobConf.get(TABLE_PROPS)).toProperties()) .reporter(reporter) - .minimumTransactionId(jobConf.getLong(MIN_TXN, Long.MAX_VALUE)) - .maximumTransactionId(jobConf.getLong(MAX_TXN, Long.MIN_VALUE)) + .minimumWriteId(jobConf.getLong(MIN_TXN, Long.MAX_VALUE)) + .maximumWriteId(jobConf.getLong(MAX_TXN, Long.MIN_VALUE)) .bucket(bucket) .statementId(-1);//setting statementId == -1 makes compacted delta files use //delta_xxxx_yyyy format @@ -925,8 +925,8 @@ public void commitJob(JobContext context) throws IOException { AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) .writingBase(conf.getBoolean(IS_MAJOR, false)) .isCompressed(conf.getBoolean(IS_COMPRESSED, false)) - .minimumTransactionId(conf.getLong(MIN_TXN, Long.MAX_VALUE)) - .maximumTransactionId(conf.getLong(MAX_TXN, Long.MIN_VALUE)) + .minimumWriteId(conf.getLong(MIN_TXN, Long.MAX_VALUE)) + .maximumWriteId(conf.getLong(MAX_TXN, Long.MIN_VALUE)) .bucket(0) .statementId(-1); Path newDeltaDir = AcidUtils.createFilename(finalLocation, options).getParent(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index a52e023..7881709 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -21,19 +21,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.CompactionRequest; -import org.apache.hadoop.hive.metastore.api.CompactionResponse; -import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.txn.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -46,6 +36,7 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -88,8 +79,6 @@ public void run() { startedAt = System.currentTimeMillis(); //todo: add method to only get current i.e. skip history - more efficient ShowCompactResponse currentCompactions = txnHandler.showCompact(new ShowCompactRequest()); - ValidTxnList txns = - TxnUtils.createValidCompactTxnList(txnHandler.getOpenTxnsInfo()); Set potentials = txnHandler.findPotentialCompactions(abortedThreshold); LOG.debug("Found " + potentials.size() + " potential compactions, " + "checking to see if we should compact any of them"); @@ -143,12 +132,20 @@ public void run() { ", assuming it has been dropped and moving on."); continue; } + + // Compaction doesn't work under a transaction and hence pass 0 for current txn Id + // The response will have one entry per table and hence we get only one OpenWriteIds + String fullTableName = TxnUtils.getFullTableName(t.getDbName(), t.getTableName()); + GetOpenWriteIdsRequest rqst = new GetOpenWriteIdsRequest(0, Collections.singletonList(fullTableName)); + ValidWriteIdList writeIds = + TxnUtils.createValidCompactWriteIdList(txnHandler.getOpenWriteIds(rqst).getOpenWriteIds().get(0)); + StorageDescriptor sd = resolveStorageDescriptor(t, p); String runAs = findUserToRunAs(sd.getLocation(), t); /*Future thought: checkForCompaction will check a lot of file metadata and may be expensive. * Long term we should consider having a thread pool here and running checkForCompactionS * in parallel*/ - CompactionType compactionNeeded = checkForCompaction(ci, txns, sd, t.getParameters(), runAs); + CompactionType compactionNeeded = checkForCompaction(ci, writeIds, sd, t.getParameters(), runAs); if (compactionNeeded != null) requestCompaction(ci, runAs, compactionNeeded); } catch (Throwable t) { LOG.error("Caught exception while trying to determine if we should compact " + @@ -215,7 +212,7 @@ private boolean lookForCurrentCompactions(ShowCompactResponse compactions, } private CompactionType checkForCompaction(final CompactionInfo ci, - final ValidTxnList txns, + final ValidWriteIdList writeIds, final StorageDescriptor sd, final Map tblproperties, final String runAs) @@ -227,7 +224,7 @@ private CompactionType checkForCompaction(final CompactionInfo ci, return CompactionType.MAJOR; } if (runJobAsSelf(runAs)) { - return determineCompactionType(ci, txns, sd, tblproperties); + return determineCompactionType(ci, writeIds, sd, tblproperties); } else { LOG.info("Going to initiate as user " + runAs); UserGroupInformation ugi = UserGroupInformation.createProxyUser(runAs, @@ -235,7 +232,7 @@ private CompactionType checkForCompaction(final CompactionInfo ci, CompactionType compactionType = ugi.doAs(new PrivilegedExceptionAction() { @Override public CompactionType run() throws Exception { - return determineCompactionType(ci, txns, sd, tblproperties); + return determineCompactionType(ci, writeIds, sd, tblproperties); } }); try { @@ -248,7 +245,7 @@ public CompactionType run() throws Exception { } } - private CompactionType determineCompactionType(CompactionInfo ci, ValidTxnList txns, + private CompactionType determineCompactionType(CompactionInfo ci, ValidWriteIdList writeIds, StorageDescriptor sd, Map tblproperties) throws IOException, InterruptedException { @@ -259,7 +256,7 @@ private CompactionType determineCompactionType(CompactionInfo ci, ValidTxnList t boolean noBase = false; Path location = new Path(sd.getLocation()); FileSystem fs = location.getFileSystem(conf); - AcidUtils.Directory dir = AcidUtils.getAcidState(location, conf, txns, false, false); + AcidUtils.Directory dir = AcidUtils.getAcidState(location, conf, writeIds, false, false); Path base = dir.getBaseDirectory(); long baseSize = 0; FileStatus stat = null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 4508e59..bf769cb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -18,16 +18,13 @@ package org.apache.hadoop.hive.ql.txn.compactor; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.mapred.JobConf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.txn.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.CommandNeedRetryException; @@ -138,10 +135,15 @@ public void run() { } final boolean isMajor = ci.isMajorCompaction(); - final ValidTxnList txns = - TxnUtils.createValidCompactTxnList(txnHandler.getOpenTxnsInfo()); - LOG.debug("ValidCompactTxnList: " + txns.writeToString()); - txnHandler.setCompactionHighestTxnId(ci, txns.getHighWatermark()); + + // Compaction doesn't work under a transaction and hence pass 0 for current txn Id + // The response will have one entry per table and hence we get only one OpenWriteIds + String fullTableName = TxnUtils.getFullTableName(t.getDbName(), t.getTableName()); + GetOpenWriteIdsRequest rqst = new GetOpenWriteIdsRequest(0, Collections.singletonList(fullTableName)); + final ValidWriteIdList writeIds = + TxnUtils.createValidCompactWriteIdList(txnHandler.getOpenWriteIds(rqst).getOpenWriteIds().get(0)); + LOG.debug("ValidCompactWriteIdList: " + writeIds.writeToString()); + txnHandler.setCompactionHighestWriteId(ci, writeIds.getHighWatermark()); final StringBuilder jobName = new StringBuilder(name); jobName.append("-compactor-"); jobName.append(ci.getFullPartitionName()); @@ -164,14 +166,14 @@ public void run() { launchedJob = true; try { if (runJobAsSelf(runAs)) { - mr.run(conf, jobName.toString(), t, sd, txns, ci, su, txnHandler); + mr.run(conf, jobName.toString(), t, sd, writeIds, ci, su, txnHandler); } else { UserGroupInformation ugi = UserGroupInformation.createProxyUser(t.getOwner(), UserGroupInformation.getLoginUser()); ugi.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws Exception { - mr.run(conf, jobName.toString(), t, sd, txns, ci, su, txnHandler); + mr.run(conf, jobName.toString(), t, sd, writeIds, ci, su, txnHandler); return null; } }); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java index 2a1545f..9e8fcca 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java @@ -766,8 +766,8 @@ public void testNonAcidToAcidConversion01() throws Exception { Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nonacidorctbl/000001_0")); Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":1}\t1\t5")); Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nonacidorctbl/000001_0_copy_1")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":16,\"bucketid\":536936448,\"rowid\":0}\t1\t17")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nonacidorctbl/delta_0000016_0000016_0000/bucket_00001")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t17")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nonacidorctbl/delta_0000001_0000001_0000/bucket_00001")); //run Compaction runStatementOnDriver("alter table "+ TestTxnCommands2.Table.NONACIDORCTBL +" compact 'major'"); TestTxnCommands2.runWorker(hiveConf); @@ -778,13 +778,13 @@ public void testNonAcidToAcidConversion01() throws Exception { } Assert.assertEquals("", 4, rs.size()); Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t0\t12")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nonacidorctbl/base_0000016/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nonacidorctbl/base_0000001/bucket_00000")); Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":0}\t1\t2")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nonacidorctbl/base_0000016/bucket_00001")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nonacidorctbl/base_0000001/bucket_00001")); Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":1}\t1\t5")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nonacidorctbl/base_0000016/bucket_00001")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":16,\"bucketid\":536936448,\"rowid\":0}\t1\t17")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nonacidorctbl/base_0000016/bucket_00001")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nonacidorctbl/base_0000001/bucket_00001")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t17")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nonacidorctbl/base_0000001/bucket_00001")); //make sure they are the same before and after compaction } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java index 048215a..28fd19d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java @@ -34,7 +34,8 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.api.CompactionRequest; @@ -366,14 +367,14 @@ public void testNonAcidToAcidConversion02() throws Exception { */ String[][] expected = { {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t0\t13", "bucket_00000"}, - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":0}\t0\t15", "bucket_00000"}, - {"{\"transactionid\":22,\"bucketid\":536870912,\"rowid\":0}\t0\t17", "bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t0\t120", "bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t15", "bucket_00000"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t0\t17", "bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t120", "bucket_00000"}, {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":1}\t1\t2", "bucket_00001"}, {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":3}\t1\t4", "bucket_00001"}, {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":2}\t1\t5", "bucket_00001"}, {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":4}\t1\t6", "bucket_00001"}, - {"{\"transactionid\":20,\"bucketid\":536936448,\"rowid\":0}\t1\t16", "bucket_00001"} + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t16", "bucket_00001"} }; Assert.assertEquals("Unexpected row count before compaction", expected.length, rs.size()); for(int i = 0; i < expected.length; i++) { @@ -758,11 +759,11 @@ public void testNonAcidToAcidConversion3() throws Exception { FileStatus[] buckets = fs.listStatus(status[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); Arrays.sort(buckets); if (numDelta == 1) { - Assert.assertEquals("delta_0000024_0000024_0000", status[i].getPath().getName()); + Assert.assertEquals("delta_0000001_0000001_0000", status[i].getPath().getName()); Assert.assertEquals(BUCKET_COUNT - 1, buckets.length); Assert.assertEquals("bucket_00001", buckets[0].getPath().getName()); } else if (numDelta == 2) { - Assert.assertEquals("delta_0000025_0000025_0000", status[i].getPath().getName()); + Assert.assertEquals("delta_0000002_0000002_0000", status[i].getPath().getName()); Assert.assertEquals(1, buckets.length); Assert.assertEquals("bucket_00001", buckets[0].getPath().getName()); } @@ -771,7 +772,7 @@ public void testNonAcidToAcidConversion3() throws Exception { FileStatus[] buckets = fs.listStatus(status[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); Arrays.sort(buckets); if (numDeleteDelta == 1) { - Assert.assertEquals("delete_delta_0000024_0000024_0000", status[i].getPath().getName()); + Assert.assertEquals("delete_delta_0000001_0000001_0000", status[i].getPath().getName()); Assert.assertEquals(BUCKET_COUNT - 1, buckets.length); Assert.assertEquals("bucket_00001", buckets[0].getPath().getName()); } @@ -818,7 +819,7 @@ public void testNonAcidToAcidConversion3() throws Exception { Assert.assertEquals("bucket_00001", buckets[0].getPath().getName()); } else if (numBase == 2) { // The new base dir now has two bucket files, since the delta dir has two bucket files - Assert.assertEquals("base_0000025", status[i].getPath().getName()); + Assert.assertEquals("base_0000002", status[i].getPath().getName()); Assert.assertEquals(1, buckets.length); Assert.assertEquals("bucket_00001", buckets[0].getPath().getName()); } @@ -844,7 +845,7 @@ public void testNonAcidToAcidConversion3() throws Exception { status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + (Table.NONACIDORCTBL).toString().toLowerCase()), FileUtils.HIDDEN_FILES_PATH_FILTER); Assert.assertEquals(1, status.length); - Assert.assertEquals("base_0000025", status[0].getPath().getName()); + Assert.assertEquals("base_0000002", status[0].getPath().getName()); FileStatus[] buckets = fs.listStatus(status[0].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); Arrays.sort(buckets); Assert.assertEquals(1, buckets.length); @@ -860,7 +861,7 @@ public void testNonAcidToAcidConversion3() throws Exception { public void testValidTxnsBookkeeping() throws Exception { // 1. Run a query against a non-ACID table, and we shouldn't have txn logged in conf runStatementOnDriver("select * from " + Table.NONACIDORCTBL); - String value = hiveConf.get(ValidTxnList.VALID_TXNS_KEY); + String value = hiveConf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); Assert.assertNull("The entry should be null for query that doesn't involve ACID tables", value); } @@ -873,9 +874,9 @@ public void testSimpleRead() throws Exception { //this will cause next txn to be marked aborted but the data is still written to disk hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + " " + makeValuesClause(tableData2)); - assert hiveConf.get(ValidTxnList.VALID_TXNS_KEY) == null : "previous txn should've cleaned it"; + assert hiveConf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) == null : "previous txn should've cleaned it"; //so now if HIVEFETCHTASKCONVERSION were to use a stale value, it would use a - //ValidTxnList with HWM=MAX_LONG, i.e. include the data for aborted txn + //ValidWriteIdList with HWM=MAX_LONG, i.e. include the data for aborted txn List rs = runStatementOnDriver("select * from " + Table.ACIDTBL); Assert.assertEquals("Extra data", 2, rs.size()); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java index 3a3272f..1f39c20 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java @@ -105,13 +105,13 @@ private void loadDataUpdate(boolean isVectorized) throws Exception { String testQuery = isVectorized ? "select ROW__ID, a, b from T order by ROW__ID" : "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; String[][] expected = new String[][]{ - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000020_0000020_0000/000000_0"}, - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000020_0000020_0000/000000_0"}}; + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/000000_0"}}; checkResult(expected, testQuery, isVectorized, "load data inpath"); runStatementOnDriver("update T set b = 17 where a = 1"); String[][] expected2 = new String[][]{ - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000020_0000020_0000/000000_0"}, - {"{\"transactionid\":23,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000023_0000023_0000/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000002_0000002_0000/bucket_00000"} }; checkResult(expected2, testQuery, isVectorized, "update"); @@ -121,15 +121,15 @@ private void loadDataUpdate(boolean isVectorized) throws Exception { runStatementOnDriver("alter table T compact 'minor'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected3 = new String[][] { - {"{\"transactionid\":23,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000020_0000027/bucket_00000"}, - {"{\"transactionid\":26,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/delta_0000020_0000027/bucket_00000"} + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000001_0000004/bucket_00000"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/delta_0000001_0000004/bucket_00000"} }; checkResult(expected3, testQuery, isVectorized, "delete compact minor"); runStatementOnDriver("load data local inpath '" + getWarehouseDir() + "/1/data' overwrite into table T"); String[][] expected4 = new String[][]{ - {"{\"transactionid\":31,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000031/000000_0"}, - {"{\"transactionid\":31,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000031/000000_0"}}; + {"{\"transactionid\":5,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000005/000000_0"}, + {"{\"transactionid\":5,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000005/000000_0"}}; checkResult(expected4, testQuery, isVectorized, "load data inpath overwrite"); //load same data again (additive) @@ -138,9 +138,9 @@ private void loadDataUpdate(boolean isVectorized) throws Exception { runStatementOnDriver("delete from T where a = 3");//matches 2 rows runStatementOnDriver("insert into T values(2,2)"); String[][] expected5 = new String[][]{ - {"{\"transactionid\":35,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000035_0000035_0000/bucket_00000"}, - {"{\"transactionid\":35,\"bucketid\":536870912,\"rowid\":1}\t1\t17", "t/delta_0000035_0000035_0000/bucket_00000"}, - {"{\"transactionid\":37,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/delta_0000037_0000037_0000/bucket_00000"} + {"{\"transactionid\":7,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000007_0000007_0000/bucket_00000"}, + {"{\"transactionid\":7,\"bucketid\":536870912,\"rowid\":1}\t1\t17", "t/delta_0000007_0000007_0000/bucket_00000"}, + {"{\"transactionid\":9,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/delta_0000009_0000009_0000/bucket_00000"} }; checkResult(expected5, testQuery, isVectorized, "load data inpath overwrite update"); @@ -148,9 +148,9 @@ private void loadDataUpdate(boolean isVectorized) throws Exception { runStatementOnDriver("alter table T compact 'major'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected6 = new String[][]{ - {"{\"transactionid\":35,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/base_0000037/bucket_00000"}, - {"{\"transactionid\":35,\"bucketid\":536870912,\"rowid\":1}\t1\t17", "t/base_0000037/bucket_00000"}, - {"{\"transactionid\":37,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/base_0000037/bucket_00000"} + {"{\"transactionid\":7,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/base_0000009/bucket_00000"}, + {"{\"transactionid\":7,\"bucketid\":536870912,\"rowid\":1}\t1\t17", "t/base_0000009/bucket_00000"}, + {"{\"transactionid\":9,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/base_0000009/bucket_00000"} }; checkResult(expected6, testQuery, isVectorized, "load data inpath compact major"); } @@ -174,21 +174,21 @@ private void loadData(boolean isVectorized) throws Exception { "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; String[][] expected = new String[][] { //normal insert - {"{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/delta_0000016_0000016_0000/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/delta_0000016_0000016_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/delta_0000001_0000001_0000/bucket_00000"}, //Load Data - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000021_0000021_0000/000000_0"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000021_0000021_0000/000000_0"}}; + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000002_0000002_0000/000000_0"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000002_0000002_0000/000000_0"}}; checkResult(expected, testQuery, isVectorized, "load data inpath"); //test minor compaction runStatementOnDriver("alter table T compact 'minor'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected1 = new String[][] { - {"{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/delta_0000016_0000021/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/delta_0000016_0000021/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000016_0000021/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000016_0000021/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/delta_0000001_0000002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/delta_0000001_0000002/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000002/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000002/bucket_00000"} }; checkResult(expected1, testQuery, isVectorized, "load data inpath (minor)"); @@ -197,11 +197,11 @@ private void loadData(boolean isVectorized) throws Exception { runStatementOnDriver("alter table T compact 'major'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected2 = new String[][] { - {"{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/base_0000027/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/base_0000027/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000027/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000027/bucket_00000"}, - {"{\"transactionid\":27,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/base_0000027/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/base_0000003/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/base_0000003/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000003/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000003/bucket_00000"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/base_0000003/bucket_00000"} }; checkResult(expected2, testQuery, isVectorized, "load data inpath (major)"); @@ -210,8 +210,8 @@ private void loadData(boolean isVectorized) throws Exception { runStatementOnDriver("export table Tstage to '" + getWarehouseDir() +"/2'"); runStatementOnDriver("load data inpath '" + getWarehouseDir() + "/2/data' overwrite into table T"); String[][] expected3 = new String[][] { - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000033/000000_0"}, - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000033/000000_0"}}; + {"{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000004/000000_0"}, + {"{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000004/000000_0"}}; checkResult(expected3, testQuery, isVectorized, "load data inpath overwrite"); //one more major compaction @@ -219,9 +219,9 @@ private void loadData(boolean isVectorized) throws Exception { runStatementOnDriver("alter table T compact 'major'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected4 = new String[][] { - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000036/bucket_00000"}, - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000036/bucket_00000"}, - {"{\"transactionid\":36,\"bucketid\":536870912,\"rowid\":0}\t6\t6", "t/base_0000036/bucket_00000"}}; + {"{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000005/bucket_00000"}, + {"{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000005/bucket_00000"}, + {"{\"transactionid\":5,\"bucketid\":536870912,\"rowid\":0}\t6\t6", "t/base_0000005/bucket_00000"}}; checkResult(expected4, testQuery, isVectorized, "load data inpath overwrite (major)"); } /** @@ -254,20 +254,20 @@ private void loadDataNonAcid2AcidConversion(boolean isVectorized) throws Excepti /* {"transactionid":0,"bucketid":536870912,"rowid":0} 0 2/000000_0 {"transactionid":0,"bucketid":536870912,"rowid":1} 0 4/000000_0 -{"transactionid":24,"bucketid":536870912,"rowid":0} 4 4/delta_0000024_0000024_0000/000000_0 -{"transactionid":24,"bucketid":536870912,"rowid":1} 5 5/delta_0000024_0000024_0000/000000_0 +{"transactionid":1,"bucketid":536870912,"rowid":0} 4 4/delta_0000001_0000001_0000/000000_0 +{"transactionid":1,"bucketid":536870912,"rowid":1} 5 5/delta_0000001_0000001_0000/000000_0 */ String[][] expected = new String[][] { //from pre-acid insert {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/000000_0"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/000000_0"}, //from Load Data into acid converted table - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000024_0000024_0000/000000_0"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000024_0000024_0000/000000_0"}, - {"{\"transactionid\":24,\"bucketid\":536936448,\"rowid\":0}\t2\t2", "t/delta_0000024_0000024_0000/000001_0"}, - {"{\"transactionid\":24,\"bucketid\":536936448,\"rowid\":1}\t3\t3", "t/delta_0000024_0000024_0000/000001_0"}, - {"{\"transactionid\":24,\"bucketid\":537001984,\"rowid\":0}\t4\t4", "t/delta_0000024_0000024_0000/000002_0"}, - {"{\"transactionid\":24,\"bucketid\":537001984,\"rowid\":1}\t5\t5", "t/delta_0000024_0000024_0000/000002_0"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t2\t2", "t/delta_0000001_0000001_0000/000001_0"}, + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t3\t3", "t/delta_0000001_0000001_0000/000001_0"}, + {"{\"transactionid\":1,\"bucketid\":537001984,\"rowid\":0}\t4\t4", "t/delta_0000001_0000001_0000/000002_0"}, + {"{\"transactionid\":1,\"bucketid\":537001984,\"rowid\":1}\t5\t5", "t/delta_0000001_0000001_0000/000002_0"}, }; checkResult(expected, testQuery, isVectorized, "load data inpath"); @@ -279,9 +279,9 @@ private void loadDataNonAcid2AcidConversion(boolean isVectorized) throws Excepti runStatementOnDriver("load data local inpath '" + getWarehouseDir() + "/2/data' overwrite into table T"); String[][] expected2 = new String[][] { - {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000030/000000_0"}, - {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000030/000000_0"}, - {"{\"transactionid\":30,\"bucketid\":536936448,\"rowid\":0}\t8\t8", "t/base_0000030/000001_0"} + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000002/000000_0"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000002/000000_0"}, + {"{\"transactionid\":2,\"bucketid\":536936448,\"rowid\":0}\t8\t8", "t/base_0000002/000001_0"} }; checkResult(expected2, testQuery, isVectorized, "load data inpath overwrite"); @@ -291,10 +291,10 @@ private void loadDataNonAcid2AcidConversion(boolean isVectorized) throws Excepti TestTxnCommands2.runWorker(hiveConf); String[][] expected3 = new String[][] { - {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000033/bucket_00000"}, - {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000033/bucket_00000"}, - {"{\"transactionid\":30,\"bucketid\":536936448,\"rowid\":0}\t8\t8", "t/base_0000033/bucket_00001"}, - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":0}\t9\t9", "t/base_0000033/bucket_00000"} + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000003/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000003/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536936448,\"rowid\":0}\t8\t8", "t/base_0000003/bucket_00001"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t9\t9", "t/base_0000003/bucket_00000"} }; checkResult(expected3, testQuery, isVectorized, "load data inpath overwrite (major)"); @@ -326,12 +326,12 @@ public void loadDataPartitioned() throws Exception { List rs = runStatementOnDriver("select ROW__ID, p, a, b, INPUT__FILE__NAME from T order by p, ROW__ID"); String[][] expected = new String[][] { - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", "t/p=0/delta_0000020_0000020_0000/000000_0"}, - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", "t/p=0/delta_0000020_0000020_0000/000000_0"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t2", "t/p=1/delta_0000024_0000024_0000/000000_0"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4", "t/p=1/delta_0000024_0000024_0000/000000_0"}, - {"{\"transactionid\":28,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t2", "t/p=1/delta_0000028_0000028_0000/000000_0"}, - {"{\"transactionid\":28,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4", "t/p=1/delta_0000028_0000028_0000/000000_0"}}; + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", "t/p=0/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", "t/p=0/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t2", "t/p=1/delta_0000002_0000002_0000/000000_0"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4", "t/p=1/delta_0000002_0000002_0000/000000_0"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t2", "t/p=1/delta_0000003_0000003_0000/000000_0"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4", "t/p=1/delta_0000003_0000003_0000/000000_0"}}; checkExpected(rs, expected, "load data inpath partitioned"); @@ -340,10 +340,10 @@ public void loadDataPartitioned() throws Exception { runStatementOnDriver("truncate table Tstage"); runStatementOnDriver("load data inpath '" + getWarehouseDir() + "/4/data' overwrite into table T partition(p=1)"); String[][] expected2 = new String[][] { - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", "t/p=0/delta_0000020_0000020_0000/000000_0"}, - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", "t/p=0/delta_0000020_0000020_0000/000000_0"}, - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":0}\t1\t5\t2", "t/p=1/base_0000033/000000_0"}, - {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":1}\t1\t5\t4", "t/p=1/base_0000033/000000_0"}}; + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", "t/p=0/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", "t/p=0/delta_0000001_0000001_0000/000000_0"}, + {"{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":0}\t1\t5\t2", "t/p=1/base_0000004/000000_0"}, + {"{\"transactionid\":4,\"bucketid\":536870912,\"rowid\":1}\t1\t5\t4", "t/p=1/base_0000004/000000_0"}}; rs = runStatementOnDriver("select ROW__ID, p, a, b, INPUT__FILE__NAME from T order by p, ROW__ID"); checkExpected(rs, expected2, "load data inpath partitioned overwrite"); } @@ -405,20 +405,20 @@ private void testMultiStatement(boolean isVectorized) throws Exception { String testQuery = isVectorized ? "select ROW__ID, a, b from T order by ROW__ID" : "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; String[][] expected = new String[][] { - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000019_0000019_0000/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000019_0000019_0000/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870913,\"rowid\":0}\t5\t5", "t/delta_0000019_0000019_0001/000000_0"}, - {"{\"transactionid\":19,\"bucketid\":536870913,\"rowid\":1}\t6\t6", "t/delta_0000019_0000019_0001/000000_0"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":0}\t5\t5", "t/delta_0000001_0000001_0001/000000_0"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":1}\t6\t6", "t/delta_0000001_0000001_0001/000000_0"} }; checkResult(expected, testQuery, isVectorized, "load data inpath"); runStatementOnDriver("alter table T compact 'major'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected2 = new String[][] { - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000019/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000019/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870913,\"rowid\":0}\t5\t5", "t/base_0000019/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870913,\"rowid\":1}\t6\t6", "t/base_0000019/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":0}\t5\t5", "t/base_0000001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":1}\t6\t6", "t/base_0000001/bucket_00000"} }; checkResult(expected2, testQuery, isVectorized, "load data inpath (major)"); //at lest for now, Load Data w/Overwrite is not allowed in a txn: HIVE-18154 @@ -444,8 +444,8 @@ public void testAbort() throws Exception { String testQuery = isVectorized ? "select ROW__ID, a, b from T order by ROW__ID" : "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; String[][] expected = new String[][] { - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000019_0000019_0000/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000019_0000019_0000/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/bucket_00000"} }; checkResult(expected, testQuery, isVectorized, "load data inpath"); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java index 3c6b6be..b93e775 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java @@ -89,14 +89,14 @@ public void testNoBuckets() throws Exception { /**the insert creates 2 output files (presumably because there are 2 input files) * The number in the file name is writerId. This is the number encoded in ROW__ID.bucketId - * see {@link org.apache.hadoop.hive.ql.io.BucketCodec}*/ - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t0\t")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000019_0000019_0000/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000019_0000019_0000/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":19,\"bucketid\":536936448,\"rowid\":0}\t1\t1\t1\t")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000019_0000019_0000/bucket_00001")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":19,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000019_0000019_0000/bucket_00001")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t0\t")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t1\t1\t")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00001")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00001")); runStatementOnDriver("update nobuckets set c3 = 17 where c3 in(0,1)"); rs = runStatementOnDriver("select ROW__ID, c1, c2, c3, INPUT__FILE__NAME from nobuckets order by INPUT__FILE__NAME, ROW__ID"); @@ -104,22 +104,22 @@ public void testNoBuckets() throws Exception { for(String s : rs) { LOG.warn(s); } - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000019_0000019_0000/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":19,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000019_0000019_0000/bucket_00001")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00001")); //so update has 1 writer which creates bucket0 where both new rows land - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000021_0000021_0000/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t17\t")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000021_0000021_0000/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t17\t")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00000")); Set expectedFiles = new HashSet<>(); //both delete events land in a single bucket0. Each has a different ROW__ID.bucketId value (even writerId in it is different) - expectedFiles.add("ts/delete_delta_0000021_0000021_0000/bucket_00000"); - expectedFiles.add("nobuckets/delta_0000019_0000019_0000/bucket_00000"); - expectedFiles.add("nobuckets/delta_0000019_0000019_0000/bucket_00001"); - expectedFiles.add("nobuckets/delta_0000021_0000021_0000/bucket_00000"); + expectedFiles.add("ts/delete_delta_0000002_0000002_0000/bucket_00000"); + expectedFiles.add("nobuckets/delta_0000001_0000001_0000/bucket_00000"); + expectedFiles.add("nobuckets/delta_0000001_0000001_0000/bucket_00001"); + expectedFiles.add("nobuckets/delta_0000002_0000002_0000/bucket_00000"); //check that we get the right files on disk assertExpectedFileSet(expectedFiles, getWarehouseDir() + "/nobuckets"); //todo: it would be nice to check the contents of the files... could use orc.FileDump - it has @@ -133,33 +133,33 @@ public void testNoBuckets() throws Exception { LOG.warn(s); } /* -├── base_0000021 +├── base_0000002 │   ├── bucket_00000 │   └── bucket_00001 -├── delete_delta_0000021_0000021_0000 +├── delete_delta_0000002_0000002_0000 │   └── bucket_00000 -├── delta_0000019_0000019_0000 +├── delta_0000001_0000001_0000 │   ├── bucket_00000 │   └── bucket_00001 -└── delta_0000021_0000021_0000 +└── delta_0000002_0000002_0000 └── bucket_00000 */ - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/base_0000021/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/base_0000021/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t17\t")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/base_0000021/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":19,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/base_0000021/bucket_00001")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/base_0000002/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/base_0000002/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t17\t")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/base_0000002/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/base_0000002/bucket_00001")); expectedFiles.clear(); - expectedFiles.add("delete_delta_0000021_0000021_0000/bucket_00000"); - expectedFiles.add("uckets/delta_0000019_0000019_0000/bucket_00000"); - expectedFiles.add("uckets/delta_0000019_0000019_0000/bucket_00001"); - expectedFiles.add("uckets/delta_0000021_0000021_0000/bucket_00000"); - expectedFiles.add("/warehouse/nobuckets/base_0000021/bucket_00000"); - expectedFiles.add("/warehouse/nobuckets/base_0000021/bucket_00001"); + expectedFiles.add("delete_delta_0000002_0000002_0000/bucket_00000"); + expectedFiles.add("uckets/delta_0000001_0000001_0000/bucket_00000"); + expectedFiles.add("uckets/delta_0000001_0000001_0000/bucket_00001"); + expectedFiles.add("uckets/delta_0000002_0000002_0000/bucket_00000"); + expectedFiles.add("/warehouse/nobuckets/base_0000002/bucket_00000"); + expectedFiles.add("/warehouse/nobuckets/base_0000002/bucket_00001"); assertExpectedFileSet(expectedFiles, getWarehouseDir() + "/nobuckets"); TestTxnCommands2.runCleaner(hiveConf); @@ -168,8 +168,8 @@ public void testNoBuckets() throws Exception { Assert.assertEquals("Unexpected result after clean", stringifyValues(result), rs); expectedFiles.clear(); - expectedFiles.add("nobuckets/base_0000021/bucket_00000"); - expectedFiles.add("nobuckets/base_0000021/bucket_00001"); + expectedFiles.add("nobuckets/base_0000002/bucket_00000"); + expectedFiles.add("nobuckets/base_0000002/bucket_00001"); assertExpectedFileSet(expectedFiles, getWarehouseDir() + "/nobuckets"); } @@ -185,8 +185,8 @@ public void testCTAS() throws Exception { "'='true', 'transactional_properties'='default') as select a, b from " + Table.NONACIDORCTBL); List rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas order by ROW__ID"); String expected[][] = { - {"{\"transactionid\":14,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas/delta_0000014_0000014_0000/bucket_00000"}, - {"{\"transactionid\":14,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas/delta_0000014_0000014_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas/delta_0000001_0000001_0000/bucket_00000"}, }; checkExpected(rs, expected, "Unexpected row count after ctas from non acid table"); @@ -195,8 +195,8 @@ public void testCTAS() throws Exception { "'='true', 'transactional_properties'='default') as select a, b from " + Table.ACIDTBL);//todo: try this with acid default - it seem makeing table acid in listener is too late rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas2 order by ROW__ID"); String expected2[][] = { - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas2/delta_0000017_0000017_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas2/delta_0000017_0000017_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00000"}, }; checkExpected(rs, expected2, "Unexpected row count after ctas from acid table"); @@ -205,10 +205,10 @@ public void testCTAS() throws Exception { " union all select a, b from " + Table.ACIDTBL); rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas3 order by ROW__ID"); String expected3[][] = { - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas3/delta_0000019_0000019_0000/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas3/delta_0000019_0000019_0000/bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536936448,\"rowid\":0}\t3\t4", "warehouse/myctas3/delta_0000019_0000019_0000/bucket_00001"}, - {"{\"transactionid\":19,\"bucketid\":536936448,\"rowid\":1}\t1\t2", "warehouse/myctas3/delta_0000019_0000019_0000/bucket_00001"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t3\t4", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00001"}, + {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t1\t2", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00001"}, }; checkExpected(rs, expected3, "Unexpected row count after ctas from union all query"); @@ -217,8 +217,8 @@ public void testCTAS() throws Exception { " union distinct select a, b from " + Table.ACIDTBL); rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas4 order by ROW__ID"); String expected4[][] = { - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "/delta_0000021_0000021_0000/bucket_00000"}, - {"{\"transactionid\":21,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "/delta_0000021_0000021_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "/delta_0000001_0000001_0000/bucket_00000"}, }; checkExpected(rs, expected4, "Unexpected row count after ctas from union distinct query"); } @@ -268,11 +268,11 @@ public void testInsertToAcidWithUnionRemove() throws Exception { List rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"); String expected[][] = { - {"{\"transactionid\":16,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000016_0000016_0001/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000016_0000016_0001/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870914,\"rowid\":0}\t7\t8", "/delta_0000016_0000016_0002/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870914,\"rowid\":1}\t5\t6", "/delta_0000016_0000016_0002/bucket_00000"}, - {"{\"transactionid\":16,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "/delta_0000016_0000016_0003/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":0}\t7\t8", "/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":1}\t5\t6", "/delta_0000001_0000001_0002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "/delta_0000001_0000001_0003/bucket_00000"}, }; checkExpected(rs, expected, "Unexpected row count after ctas"); } @@ -376,7 +376,7 @@ logical bucket (tranche) {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t12\t12", "warehouse/t/000000_0_copy_1"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t20\t40", "warehouse/t/HIVE_UNION_SUBDIR_15/000000_0"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t50\t60", "warehouse/t/HIVE_UNION_SUBDIR_16/000000_0"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":0}\t60\t88", "warehouse/t/delta_0000024_0000024_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t60\t88", "warehouse/t/delta_0000001_0000001_0000/bucket_00000"}, }; rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from T order by a, b, INPUT__FILE__NAME"); checkExpected(rs, expected3,"after converting to acid (no compaction with updates)"); @@ -388,15 +388,15 @@ logical bucket (tranche) /*Compaction preserves location of rows wrt buckets/tranches (for now)*/ String expected4[][] = { - {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":0}\t1\t2", "warehouse/t/base_0000026/bucket_00002"}, - {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":1}\t2\t4", "warehouse/t/base_0000026/bucket_00002"}, - {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":1}\t5\t6", "warehouse/t/base_0000026/bucket_00001"}, - {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":0}\t9\t10", "warehouse/t/base_0000026/bucket_00001"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}\t10\t20", "warehouse/t/base_0000026/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t12\t12", "warehouse/t/base_0000026/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t20\t40", "warehouse/t/base_0000026/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t50\t60", "warehouse/t/base_0000026/bucket_00000"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":0}\t60\t88", "warehouse/t/base_0000026/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":0}\t1\t2", "warehouse/t/base_0000002/bucket_00002"}, + {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":1}\t2\t4", "warehouse/t/base_0000002/bucket_00002"}, + {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":1}\t5\t6", "warehouse/t/base_0000002/bucket_00001"}, + {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":0}\t9\t10", "warehouse/t/base_0000002/bucket_00001"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}\t10\t20", "warehouse/t/base_0000002/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t12\t12", "warehouse/t/base_0000002/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t20\t40", "warehouse/t/base_0000002/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t50\t60", "warehouse/t/base_0000002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t60\t88", "warehouse/t/base_0000002/bucket_00000"}, }; checkExpected(rs, expected4,"after major compact"); } @@ -414,14 +414,14 @@ public void testInsertFromUnion() throws Exception { } /* The number of writers seems to be based on number of MR jobs for the src query. todo check number of FileSinks - warehouse/t/.hive-staging_hive_2017-09-13_08-59-28_141_6304543600372946004-1/-ext-10000/000000_0/delta_0000016_0000016_0000/bucket_00000 [length: 648] - {"operation":0,"originalTransaction":16,"bucket":536870912,"rowId":0,"currentTransaction":16,"row":{"_col0":1,"_col1":2}} - {"operation":0,"originalTransaction":16,"bucket":536870912,"rowId":1,"currentTransaction":16,"row":{"_col0":2,"_col1":4}} + warehouse/t/.hive-staging_hive_2017-09-13_08-59-28_141_6304543600372946004-1/-ext-10000/000000_0/delta_0000001_0000001_0000/bucket_00000 [length: 648] + {"operation":0,"originalTransaction":1,"bucket":536870912,"rowId":0,"currentTransaction":1,"row":{"_col0":1,"_col1":2}} + {"operation":0,"originalTransaction":1,"bucket":536870912,"rowId":1,"currentTransaction":1,"row":{"_col0":2,"_col1":4}} ________________________________________________________________________________________________________________________ - warehouse/t/.hive-staging_hive_2017-09-13_08-59-28_141_6304543600372946004-1/-ext-10000/000001_0/delta_0000016_0000016_0000/bucket_00001 [length: 658] - {"operation":0,"originalTransaction":16,"bucket":536936448,"rowId":0,"currentTransaction":16,"row":{"_col0":5,"_col1":6}} - {"operation":0,"originalTransaction":16,"bucket":536936448,"rowId":1,"currentTransaction":16,"row":{"_col0":6,"_col1":8}} - {"operation":0,"originalTransaction":16,"bucket":536936448,"rowId":2,"currentTransaction":16,"row":{"_col0":9,"_col1":10}} + warehouse/t/.hive-staging_hive_2017-09-13_08-59-28_141_6304543600372946004-1/-ext-10000/000001_0/delta_0000001_0000001_0000/bucket_00001 [length: 658] + {"operation":0,"originalTransaction":1,"bucket":536936448,"rowId":0,"currentTransaction":1,"row":{"_col0":5,"_col1":6}} + {"operation":0,"originalTransaction":1,"bucket":536936448,"rowId":1,"currentTransaction":1,"row":{"_col0":6,"_col1":8}} + {"operation":0,"originalTransaction":1,"bucket":536936448,"rowId":2,"currentTransaction":1,"row":{"_col0":9,"_col1":10}} */ rs = runStatementOnDriver("select a, b from T order by a, b"); Assert.assertEquals(stringifyValues(values), rs); @@ -469,14 +469,14 @@ public void testToAcidConversion02() throws Exception { */ String[][] expected = { {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t0\t13", "bucket_00000", "000000_0_copy_1"}, - {"{\"transactionid\":18,\"bucketid\":536870912,\"rowid\":0}\t0\t15", "bucket_00000", "bucket_00000"}, - {"{\"transactionid\":20,\"bucketid\":536870912,\"rowid\":0}\t0\t17", "bucket_00000", "bucket_00000"}, - {"{\"transactionid\":19,\"bucketid\":536870912,\"rowid\":0}\t0\t120", "bucket_00000", "bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t15", "bucket_00000", "bucket_00000"}, + {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t0\t17", "bucket_00000", "bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t120", "bucket_00000", "bucket_00000"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "bucket_00000", "000000_0"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t1\t4", "bucket_00000", "000000_0_copy_1"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":5}\t1\t5", "bucket_00000", "000000_0_copy_1"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":6}\t1\t6", "bucket_00000", "000000_0_copy_2"}, - {"{\"transactionid\":18,\"bucketid\":536870912,\"rowid\":1}\t1\t16", "bucket_00000", "bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t16", "bucket_00000", "bucket_00000"} }; Assert.assertEquals("Unexpected row count before compaction", expected.length, rs.size()); for(int i = 0; i < expected.length; i++) { @@ -491,17 +491,17 @@ public void testToAcidConversion02() throws Exception { ├── 000000_0 ├── 000000_0_copy_1 ├── 000000_0_copy_2 - ├── base_0000021 + ├── base_0000004 │   └── bucket_00000 - ├── delete_delta_0000019_0000019_0000 + ├── delete_delta_0000002_0000002_0000 │   └── bucket_00000 - ├── delete_delta_0000021_0000021_0000 + ├── delete_delta_0000004_0000004_0000 │   └── bucket_00000 - ├── delta_0000018_0000018_0000 + ├── delta_0000001_0000001_0000 │   └── bucket_00000 - ├── delta_0000019_0000019_0000 + ├── delta_0000002_0000002_0000 │   └── bucket_00000 - └── delta_0000020_0000020_0000 + └── delta_0000003_0000003_0000 └── bucket_00000 6 directories, 9 files @@ -606,7 +606,7 @@ public void testNonAcidToAcidVectorzied() throws Exception { query = "select ROW__ID, b from T where b > 0 order by a"; rs = runStatementOnDriver(query); String[][] expected4 = { - {"{\"transactionid\":25,\"bucketid\":536870912,\"rowid\":0}","17"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}","17"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}","4"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}","6"}, {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}","8"}, @@ -627,11 +627,11 @@ public void testNonAcidToAcidVectorzied() throws Exception { query = "select ROW__ID, a, b, INPUT__FILE__NAME from T where b > 0 order by a, b"; rs = runStatementOnDriver(query); String[][] expected5 = {//the row__ids are the same after compaction - {"{\"transactionid\":25,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "warehouse/t/base_0000025/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t2\t4", "warehouse/t/base_0000025/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}\t5\t6", "warehouse/t/base_0000025/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t6\t8", "warehouse/t/base_0000025/bucket_00000"}, - {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t9\t10", "warehouse/t/base_0000025/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "warehouse/t/base_0000001/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t2\t4", "warehouse/t/base_0000001/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}\t5\t6", "warehouse/t/base_0000001/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t6\t8", "warehouse/t/base_0000001/bucket_00000"}, + {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t9\t10", "warehouse/t/base_0000001/bucket_00000"} }; checkExpected(rs, expected5, "After major compaction"); //vectorized because there is INPUT__FILE__NAME @@ -671,14 +671,14 @@ public void testCompactStatsGather() throws Exception { String query = "select ROW__ID, p, q, a, b, INPUT__FILE__NAME from T order by p, q, a, b"; List rs = runStatementOnDriver(query); String[][] expected = { - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000017_0000017_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000017_0000017_0000/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/delta_0000017_0000017_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/delta_0000017_0000017_0000/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/delta_0000002_0000002_0000/bucket_00000"} }; checkExpected(rs, expected, "insert data"); @@ -689,14 +689,14 @@ public void testCompactStatsGather() throws Exception { query = "select ROW__ID, p, q, a, b, INPUT__FILE__NAME from T order by p, q, a, b"; rs = runStatementOnDriver(query); String[][] expected2 = { - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000017_0000017_0000/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000017_0000017_0000/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/base_0000017/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/base_0000017/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/base_0000017/bucket_00000"}, - {"{\"transactionid\":17,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/base_0000017/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/base_0000002/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/base_0000002/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/base_0000002/bucket_00000"}, + {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/base_0000002/bucket_00000"} }; checkExpected(rs, expected2, "after major compaction"); @@ -721,8 +721,8 @@ public void testDefault() throws Exception { List rs = runStatementOnDriver(query); String[][] expected = { //this proves data is written in Acid layout so T was made Acid - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000015_0000015_0000/bucket_00000"}, - {"{\"transactionid\":15,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000015_0000015_0000/bucket_00000"} + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/bucket_00000"} }; checkExpected(rs, expected, "insert data"); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java index 2a520f4..461d142 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java @@ -27,7 +27,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.CompilationOpContext; @@ -292,8 +292,8 @@ private FileSinkOperator getFileSink(AcidUtils.Operation writeType, } desc.setWriteType(writeType); desc.setGatherStats(true); - if (txnId > 0) desc.setTransactionId(txnId); - if (writeType != AcidUtils.Operation.NOT_ACID) desc.setTransactionId(1L); + if (txnId > 0) desc.setTableWriteId(txnId); + if (writeType != AcidUtils.Operation.NOT_ACID) desc.setTableWriteId(1L); FileSinkOperator op = (FileSinkOperator)OperatorFactory.get( new CompilationOpContext(), FileSinkDesc.class); @@ -699,7 +699,7 @@ public float getProgress() throws IOException { public RawReader getRawReader(Configuration conf, boolean collapseEvents, int bucket, - ValidTxnList validTxnList, + ValidWriteIdList validWriteIdList, Path baseDirectory, Path[] deltaDirectory) throws IOException { @@ -725,18 +725,18 @@ public RecordUpdater getRecordUpdater(final Path path, final Options options) th return new RecordUpdater() { @Override - public void insert(long currentTransaction, Object row) throws IOException { + public void insert(long currentWriteId, Object row) throws IOException { addRow(row); numRecordsAdded++; } @Override - public void update(long currentTransaction, Object row) throws IOException { + public void update(long currentWriteId, Object row) throws IOException { addRow(row); } @Override - public void delete(long currentTransaction, Object row) throws IOException { + public void delete(long currentWriteId, Object row) throws IOException { addRow(row); numRecordsAdded--; } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java index 23dadd0..a1eb39d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java @@ -49,8 +49,8 @@ public void testDeltaMetaDataReadFieldsNoStatementIds() throws Exception { deltaMetaData.readFields(mockDataInput); verify(mockDataInput, times(1)).readInt(); - assertThat(deltaMetaData.getMinTxnId(), is(1L)); - assertThat(deltaMetaData.getMaxTxnId(), is(2L)); + assertThat(deltaMetaData.getMinWriteId(), is(1L)); + assertThat(deltaMetaData.getMaxWriteId(), is(2L)); assertThat(deltaMetaData.getStmtIds().isEmpty(), is(true)); } @@ -63,8 +63,8 @@ public void testDeltaMetaDataReadFieldsWithStatementIds() throws Exception { deltaMetaData.readFields(mockDataInput); verify(mockDataInput, times(3)).readInt(); - assertThat(deltaMetaData.getMinTxnId(), is(1L)); - assertThat(deltaMetaData.getMaxTxnId(), is(2L)); + assertThat(deltaMetaData.getMinWriteId(), is(1L)); + assertThat(deltaMetaData.getMaxWriteId(), is(2L)); assertThat(deltaMetaData.getStmtIds().size(), is(2)); assertThat(deltaMetaData.getStmtIds().get(0), is(100)); assertThat(deltaMetaData.getStmtIds().get(1), is(101)); @@ -74,8 +74,8 @@ public void testDeltaMetaDataReadFieldsWithStatementIds() throws Exception { public void testDeltaMetaConstructWithState() throws Exception { DeltaMetaData deltaMetaData = new AcidInputFormat.DeltaMetaData(2000L, 2001L, Arrays.asList(97, 98, 99)); - assertThat(deltaMetaData.getMinTxnId(), is(2000L)); - assertThat(deltaMetaData.getMaxTxnId(), is(2001L)); + assertThat(deltaMetaData.getMinWriteId(), is(2000L)); + assertThat(deltaMetaData.getMaxWriteId(), is(2001L)); assertThat(deltaMetaData.getStmtIds().size(), is(3)); assertThat(deltaMetaData.getStmtIds().get(0), is(97)); assertThat(deltaMetaData.getStmtIds().get(1), is(98)); @@ -95,8 +95,8 @@ public void testDeltaMetaDataReadFieldsWithStatementIdsResetsState() throws Exce deltaMetaData.readFields(mockDataInput); verify(mockDataInput, times(3)).readInt(); - assertThat(deltaMetaData.getMinTxnId(), is(1L)); - assertThat(deltaMetaData.getMaxTxnId(), is(2L)); + assertThat(deltaMetaData.getMinWriteId(), is(1L)); + assertThat(deltaMetaData.getMaxWriteId(), is(2L)); assertThat(deltaMetaData.getStmtIds().size(), is(2)); assertThat(deltaMetaData.getStmtIds().get(0), is(100)); assertThat(deltaMetaData.getStmtIds().get(1), is(101)); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java index 8945fdf..437980b 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java @@ -27,13 +27,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidCompactorTxnList; -import org.apache.hadoop.hive.common.ValidReadTxnList; +import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.AcidUtils.AcidOperationalProperties; -import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat; import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockFile; import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockFileSystem; import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockPath; @@ -56,8 +55,8 @@ public void testCreateFilename() throws Exception { assertEquals("/tmp/000123_0", AcidUtils.createFilename(p, options).toString()); options.bucket(23) - .minimumTransactionId(100) - .maximumTransactionId(200) + .minimumWriteId(100) + .maximumWriteId(200) .writingBase(true) .setOldStyle(false); assertEquals("/tmp/base_0000200/bucket_00023", @@ -92,8 +91,8 @@ public void testCreateFilenameLargeIds() throws Exception { assertEquals("/tmp/123456789_0", AcidUtils.createFilename(p, options).toString()); options.bucket(23) - .minimumTransactionId(1234567880) - .maximumTransactionId(1234567890) + .minimumWriteId(1234567880) + .maximumWriteId(1234567890) .writingBase(true) .setOldStyle(false); assertEquals("/tmp/base_1234567890/bucket_00023", @@ -118,29 +117,29 @@ public void testParsing() throws Exception { conf); assertEquals(false, opts.getOldStyle()); assertEquals(true, opts.isWritingBase()); - assertEquals(567, opts.getMaximumTransactionId()); - assertEquals(0, opts.getMinimumTransactionId()); + assertEquals(567, opts.getMaximumWriteId()); + assertEquals(0, opts.getMinimumWriteId()); assertEquals(123, opts.getBucketId()); opts = AcidUtils.parseBaseOrDeltaBucketFilename( new MockPath(fs, dir + "/delta_000005_000006/bucket_00001"), conf); assertEquals(false, opts.getOldStyle()); assertEquals(false, opts.isWritingBase()); - assertEquals(6, opts.getMaximumTransactionId()); - assertEquals(5, opts.getMinimumTransactionId()); + assertEquals(6, opts.getMaximumWriteId()); + assertEquals(5, opts.getMinimumWriteId()); assertEquals(1, opts.getBucketId()); opts = AcidUtils.parseBaseOrDeltaBucketFilename( new MockPath(fs, dir + "/delete_delta_000005_000006/bucket_00001"), conf); assertEquals(false, opts.getOldStyle()); assertEquals(false, opts.isWritingBase()); - assertEquals(6, opts.getMaximumTransactionId()); - assertEquals(5, opts.getMinimumTransactionId()); + assertEquals(6, opts.getMaximumWriteId()); + assertEquals(5, opts.getMinimumWriteId()); assertEquals(1, opts.getBucketId()); opts = AcidUtils.parseBaseOrDeltaBucketFilename(new Path(dir, "000123_0"), conf); assertEquals(true, opts.getOldStyle()); assertEquals(true, opts.isWritingBase()); assertEquals(123, opts.getBucketId()); - assertEquals(0, opts.getMinimumTransactionId()); - assertEquals(0, opts.getMaximumTransactionId()); + assertEquals(0, opts.getMinimumWriteId()); + assertEquals(0, opts.getMaximumWriteId()); } @@ -160,7 +159,7 @@ public void testOriginal() throws Exception { new MockFile("mock:/tbl/part1/subdir/000000_0", 0, new byte[0])); AcidUtils.Directory dir = AcidUtils.getAcidState(new MockPath(fs, "/tbl/part1"), conf, - new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals(null, dir.getBaseDirectory()); assertEquals(0, dir.getCurrentDirectories().size()); assertEquals(0, dir.getObsolete().size()); @@ -195,7 +194,7 @@ public void testOriginalDeltas() throws Exception { new MockFile("mock:/tbl/part1/delta_101_101/bucket_0", 0, new byte[0])); AcidUtils.Directory dir = AcidUtils.getAcidState(new MockPath(fs, - "mock:/tbl/part1"), conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + "mock:/tbl/part1"), conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals(null, dir.getBaseDirectory()); List obsolete = dir.getObsolete(); assertEquals(2, obsolete.size()); @@ -215,12 +214,12 @@ public void testOriginalDeltas() throws Exception { assertEquals(2, deltas.size()); AcidUtils.ParsedDelta delt = deltas.get(0); assertEquals("mock:/tbl/part1/delta_025_030", delt.getPath().toString()); - assertEquals(25, delt.getMinTransaction()); - assertEquals(30, delt.getMaxTransaction()); + assertEquals(25, delt.getMinWriteId()); + assertEquals(30, delt.getMaxWriteId()); delt = deltas.get(1); assertEquals("mock:/tbl/part1/delta_050_100", delt.getPath().toString()); - assertEquals(50, delt.getMinTransaction()); - assertEquals(100, delt.getMaxTransaction()); + assertEquals(50, delt.getMinWriteId()); + assertEquals(100, delt.getMaxWriteId()); } @Test @@ -237,7 +236,7 @@ public void testBaseDeltas() throws Exception { new MockFile("mock:/tbl/part1/delta_90_120/bucket_0", 0, new byte[0])); AcidUtils.Directory dir = AcidUtils.getAcidState(new MockPath(fs, - "mock:/tbl/part1"), conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + "mock:/tbl/part1"), conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_49", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); assertEquals(5, obsolete.size()); @@ -251,8 +250,8 @@ public void testBaseDeltas() throws Exception { assertEquals(1, deltas.size()); AcidUtils.ParsedDelta delt = deltas.get(0); assertEquals("mock:/tbl/part1/delta_050_105", delt.getPath().toString()); - assertEquals(50, delt.getMinTransaction()); - assertEquals(105, delt.getMaxTransaction()); + assertEquals(50, delt.getMinWriteId()); + assertEquals(105, delt.getMaxWriteId()); } @Test @@ -265,7 +264,7 @@ public void testObsoleteOriginals() throws Exception { new MockFile("mock:/tbl/part1/000001_1", 500, new byte[0])); Path part = new MockPath(fs, "/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidReadTxnList("150:" + Long.MAX_VALUE + ":")); + AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:150:" + Long.MAX_VALUE + ":")); // Obsolete list should include the two original bucket files, and the old base dir List obsolete = dir.getObsolete(); assertEquals(3, obsolete.size()); @@ -286,7 +285,7 @@ public void testOverlapingDelta() throws Exception { new MockFile("mock:/tbl/part1/base_50/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_50", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); assertEquals(2, obsolete.size()); @@ -321,7 +320,7 @@ public void testOverlapingDelta2() throws Exception { new MockFile("mock:/tbl/part1/base_50/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_50", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); assertEquals(5, obsolete.size()); @@ -346,7 +345,7 @@ public void deltasWithOpenTxnInRead() throws Exception { new MockFile("mock:/tbl/part1/delta_1_1/bucket_0", 500, new byte[0]), new MockFile("mock:/tbl/part1/delta_2_5/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); - AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:4:4")); + AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:4:4")); List delts = dir.getCurrentDirectories(); assertEquals(2, delts.size()); assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString()); @@ -367,7 +366,7 @@ public void deltasWithOpenTxnInRead2() throws Exception { new MockFile("mock:/tbl/part1/delta_4_4_3/bucket_0", 500, new byte[0]), new MockFile("mock:/tbl/part1/delta_101_101_1/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); - AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:4:4")); + AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:4:4")); List delts = dir.getCurrentDirectories(); assertEquals(2, delts.size()); assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString()); @@ -382,7 +381,7 @@ public void deltasWithOpenTxnsNotInCompact() throws Exception { new MockFile("mock:/tbl/part1/delta_2_5/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidCompactorTxnList("4:" + Long.MAX_VALUE)); + AcidUtils.getAcidState(part, conf, new ValidCompactorWriteIdList("tbl:4:" + Long.MAX_VALUE)); List delts = dir.getCurrentDirectories(); assertEquals(1, delts.size()); assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString()); @@ -399,7 +398,7 @@ public void deltasWithOpenTxnsNotInCompact2() throws Exception { new MockFile("mock:/tbl/part1/delta_6_10/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidCompactorTxnList("3:" + Long.MAX_VALUE)); + AcidUtils.getAcidState(part, conf, new ValidCompactorWriteIdList("tbl:3:" + Long.MAX_VALUE)); List delts = dir.getCurrentDirectories(); assertEquals(1, delts.size()); assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString()); @@ -424,7 +423,7 @@ public void testBaseWithDeleteDeltas() throws Exception { new MockFile("mock:/tbl/part1/delete_delta_110_110/bucket_0", 0, new byte[0])); AcidUtils.Directory dir = AcidUtils.getAcidState(new MockPath(fs, - "mock:/tbl/part1"), conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + "mock:/tbl/part1"), conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_49", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); assertEquals(7, obsolete.size()); @@ -461,7 +460,7 @@ public void testOverlapingDeltaAndDeleteDelta() throws Exception { new MockFile("mock:/tbl/part1/base_50/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_50", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); assertEquals(3, obsolete.size()); @@ -490,7 +489,7 @@ public void testMinorCompactedDeltaMakesInBetweenDelteDeltaObsolete() throws Exc new MockFile("mock:/tbl/part1/delete_delta_50_50/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); + AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); List obsolete = dir.getObsolete(); assertEquals(1, obsolete.size()); assertEquals("mock:/tbl/part1/delete_delta_50_50", obsolete.get(0).getPath().toString()); @@ -517,7 +516,7 @@ public void deltasAndDeleteDeltasWithOpenTxnsNotInCompact() throws Exception { new MockFile("mock:/tbl/part1/delta_6_10/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = - AcidUtils.getAcidState(part, conf, new ValidCompactorTxnList("4:" + Long.MAX_VALUE + ":")); + AcidUtils.getAcidState(part, conf, new ValidCompactorWriteIdList("tbl:4:" + Long.MAX_VALUE + ":")); List delts = dir.getCurrentDirectories(); assertEquals(2, delts.size()); assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString()); @@ -538,7 +537,7 @@ public void deleteDeltasWithOpenTxnInRead() throws Exception { new MockFile("mock:/tbl/part1/delta_4_4_3/bucket_0", 500, new byte[0]), new MockFile("mock:/tbl/part1/delta_101_101_1/bucket_0", 500, new byte[0])); Path part = new MockPath(fs, "mock:/tbl/part1"); - AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:4:4")); + AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:4:4")); List delts = dir.getCurrentDirectories(); assertEquals(3, delts.size()); assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index 92f005d..8c7e79b 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -48,7 +48,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -2267,7 +2267,7 @@ public void testVectorizationWithAcid() throws Exception { // write the orc file to the mock file system Path partDir = new Path(conf.get("mapred.input.dir")); OrcRecordUpdater writer = new OrcRecordUpdater(partDir, - new AcidOutputFormat.Options(conf).maximumTransactionId(10) + new AcidOutputFormat.Options(conf).maximumWriteId(10) .writingBase(true).bucket(0).inspector(inspector).finalDestination(partDir)); for (int i = 0; i < 100; ++i) { BigRow row = new BigRow(i); @@ -2424,7 +2424,7 @@ public void testCombinationInputFormatWithAcid() throws Exception { // write a base file in partition 0 OrcRecordUpdater writer = new OrcRecordUpdater(partDir[0], - new AcidOutputFormat.Options(conf).maximumTransactionId(10) + new AcidOutputFormat.Options(conf).maximumWriteId(10) .writingBase(true).bucket(0).inspector(inspector).finalDestination(partDir[0])); for(int i=0; i < 10; ++i) { writer.insert(10, new MyRow(i, 2 * i)); @@ -2437,7 +2437,7 @@ public void testCombinationInputFormatWithAcid() throws Exception { // write a delta file in partition 0 writer = new OrcRecordUpdater(partDir[0], - new AcidOutputFormat.Options(conf).maximumTransactionId(10) + new AcidOutputFormat.Options(conf).maximumWriteId(10) .writingBase(true).bucket(1).inspector(inspector).finalDestination(partDir[0])); for(int i=10; i < 20; ++i) { writer.insert(10, new MyRow(i, 2*i)); @@ -3558,12 +3558,12 @@ public void testACIDReaderNoFooterSerializeWithDeltas() throws Exception { } writer.close(); - AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).bucket(1).minimumTransactionId(1) - .maximumTransactionId(1).inspector(inspector).finalDestination(mockPath); + AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).bucket(1).minimumWriteId(1) + .maximumWriteId(1).inspector(inspector).finalDestination(mockPath); OrcOutputFormat of = new OrcOutputFormat(); RecordUpdater ru = of.getRecordUpdater(mockPath, options); for (int i = 0; i < 10; ++i) { - ru.insert(options.getMinimumTransactionId(), new MyRow(i, 2 * i)); + ru.insert(options.getMinimumWriteId(), new MyRow(i, 2 * i)); } ru.close(false);//this deletes the side file @@ -3637,12 +3637,12 @@ public void testACIDReaderFooterSerializeWithDeltas() throws Exception { } writer.close(); - AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).bucket(1).minimumTransactionId(1) - .maximumTransactionId(1).inspector(inspector).finalDestination(mockPath); + AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).bucket(1).minimumWriteId(1) + .maximumWriteId(1).inspector(inspector).finalDestination(mockPath); OrcOutputFormat of = new OrcOutputFormat(); RecordUpdater ru = of.getRecordUpdater(mockPath, options); for (int i = 0; i < 10; ++i) { - ru.insert(options.getMinimumTransactionId(), new MyRow(i, 2 * i)); + ru.insert(options.getMinimumWriteId(), new MyRow(i, 2 * i)); } ru.close(false);//this deletes the side file @@ -3894,7 +3894,7 @@ public void testColumnProjectionWithAcid() throws Exception { long fileLength = fs.getFileStatus(testFilePath).getLen(); // test with same schema with include - conf.set(ValidTxnList.VALID_TXNS_KEY, "100:99:"); + conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, "tbl:100:99:"); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "a,b,d"); conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "int,struct,string"); conf.set(ColumnProjectionUtils.READ_ALL_COLUMNS, "false"); @@ -3911,7 +3911,7 @@ public void testColumnProjectionWithAcid() throws Exception { while (reader.next(id, struct)) { assertEquals("id " + record, record, id.getRowId()); assertEquals("bucket " + record, 0, id.getBucketProperty()); - assertEquals("trans " + record, 1, id.getTransactionId()); + assertEquals("writeid " + record, 1, id.getWriteId()); assertEquals("a " + record, 42 * record, ((IntWritable) struct.getFieldValue(0)).get()); assertEquals(null, struct.getFieldValue(1)); @@ -3938,7 +3938,7 @@ public void testColumnProjectionWithAcid() throws Exception { while (reader.next(id, struct)) { assertEquals("id " + record, record, id.getRowId()); assertEquals("bucket " + record, 0, id.getBucketProperty()); - assertEquals("trans " + record, 1, id.getTransactionId()); + assertEquals("writeid " + record, 1, id.getWriteId()); assertEquals("a " + record, 42 * record, ((IntWritable) struct.getFieldValue(0)).get()); assertEquals(null, struct.getFieldValue(1)); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java index c6a866a..3f04ee0 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java @@ -35,8 +35,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.common.ValidReadTxnList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.io.AcidOutputFormat; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -121,11 +121,11 @@ private static void setRow(OrcStruct event, long currentTransaction, String value) { event.setFieldValue(OrcRecordUpdater.OPERATION, new IntWritable(operation)); - event.setFieldValue(OrcRecordUpdater.ORIGINAL_TRANSACTION, + event.setFieldValue(OrcRecordUpdater.ORIGINAL_WRITEID, new LongWritable(originalTransaction)); event.setFieldValue(OrcRecordUpdater.BUCKET, new IntWritable(bucket)); event.setFieldValue(OrcRecordUpdater.ROW_ID, new LongWritable(rowId)); - event.setFieldValue(OrcRecordUpdater.CURRENT_TRANSACTION, + event.setFieldValue(OrcRecordUpdater.CURRENT_WRITEID, new LongWritable(currentTransaction)); OrcStruct row = new OrcStruct(1); row.setFieldValue(0, new Text(value)); @@ -195,17 +195,17 @@ public void testReaderPair() throws Exception { ReaderPair pair = new OrcRawRecordMerger.ReaderPairAcid(key, reader, minKey, maxKey, new Reader.Options(), 0); RecordReader recordReader = pair.getRecordReader(); - assertEquals(10, key.getTransactionId()); + assertEquals(10, key.getWriteId()); assertEquals(20, key.getBucketProperty()); assertEquals(40, key.getRowId()); - assertEquals(120, key.getCurrentTransactionId()); + assertEquals(120, key.getCurrentWriteId()); assertEquals("third", value(pair.nextRecord())); pair.next(pair.nextRecord()); - assertEquals(40, key.getTransactionId()); + assertEquals(40, key.getWriteId()); assertEquals(50, key.getBucketProperty()); assertEquals(60, key.getRowId()); - assertEquals(130, key.getCurrentTransactionId()); + assertEquals(130, key.getCurrentWriteId()); assertEquals("fourth", value(pair.nextRecord())); pair.next(pair.nextRecord()); @@ -221,38 +221,38 @@ public void testReaderPairNoMin() throws Exception { ReaderPair pair = new OrcRawRecordMerger.ReaderPairAcid(key, reader, null, null, new Reader.Options(), 0); RecordReader recordReader = pair.getRecordReader(); - assertEquals(10, key.getTransactionId()); + assertEquals(10, key.getWriteId()); assertEquals(20, key.getBucketProperty()); assertEquals(20, key.getRowId()); - assertEquals(100, key.getCurrentTransactionId()); + assertEquals(100, key.getCurrentWriteId()); assertEquals("first", value(pair.nextRecord())); pair.next(pair.nextRecord()); - assertEquals(10, key.getTransactionId()); + assertEquals(10, key.getWriteId()); assertEquals(20, key.getBucketProperty()); assertEquals(30, key.getRowId()); - assertEquals(110, key.getCurrentTransactionId()); + assertEquals(110, key.getCurrentWriteId()); assertEquals("second", value(pair.nextRecord())); pair.next(pair.nextRecord()); - assertEquals(10, key.getTransactionId()); + assertEquals(10, key.getWriteId()); assertEquals(20, key.getBucketProperty()); assertEquals(40, key.getRowId()); - assertEquals(120, key.getCurrentTransactionId()); + assertEquals(120, key.getCurrentWriteId()); assertEquals("third", value(pair.nextRecord())); pair.next(pair.nextRecord()); - assertEquals(40, key.getTransactionId()); + assertEquals(40, key.getWriteId()); assertEquals(50, key.getBucketProperty()); assertEquals(60, key.getRowId()); - assertEquals(130, key.getCurrentTransactionId()); + assertEquals(130, key.getCurrentWriteId()); assertEquals("fourth", value(pair.nextRecord())); pair.next(pair.nextRecord()); - assertEquals(40, key.getTransactionId()); + assertEquals(40, key.getWriteId()); assertEquals(50, key.getBucketProperty()); assertEquals(61, key.getRowId()); - assertEquals(140, key.getCurrentTransactionId()); + assertEquals(140, key.getCurrentWriteId()); assertEquals("fifth", value(pair.nextRecord())); pair.next(pair.nextRecord()); @@ -303,19 +303,19 @@ public void testOriginalReaderPair() throws Exception { fs.makeQualified(root); fs.create(root); ReaderPair pair = new OrcRawRecordMerger.OriginalReaderPairToRead(key, reader, BUCKET, minKey, maxKey, - new Reader.Options().include(includes), new OrcRawRecordMerger.Options().rootPath(root), conf, new ValidReadTxnList(), 0); + new Reader.Options().include(includes), new OrcRawRecordMerger.Options().rootPath(root), conf, new ValidReaderWriteIdList(), 0); RecordReader recordReader = pair.getRecordReader(); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(2, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); assertEquals("third", value(pair.nextRecord())); pair.next(pair.nextRecord()); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(3, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); assertEquals("fourth", value(pair.nextRecord())); pair.next(pair.nextRecord()); @@ -323,8 +323,8 @@ public void testOriginalReaderPair() throws Exception { Mockito.verify(recordReader).close(); } - private static ValidTxnList createMaximalTxnList() { - return new ValidReadTxnList(); + private static ValidWriteIdList createMaximalTxnList() { + return new ValidReaderWriteIdList(); } @Test @@ -339,40 +339,40 @@ public void testOriginalReaderPairNoMin() throws Exception { fs.makeQualified(root); fs.create(root); ReaderPair pair = new OrcRawRecordMerger.OriginalReaderPairToRead(key, reader, BUCKET, null, null, - new Reader.Options(), new OrcRawRecordMerger.Options().rootPath(root), conf, new ValidReadTxnList(), 0); + new Reader.Options(), new OrcRawRecordMerger.Options().rootPath(root), conf, new ValidReaderWriteIdList(), 0); assertEquals("first", value(pair.nextRecord())); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(0, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); pair.next(pair.nextRecord()); assertEquals("second", value(pair.nextRecord())); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(1, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); pair.next(pair.nextRecord()); assertEquals("third", value(pair.nextRecord())); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(2, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); pair.next(pair.nextRecord()); assertEquals("fourth", value(pair.nextRecord())); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(3, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); pair.next(pair.nextRecord()); assertEquals("fifth", value(pair.nextRecord())); - assertEquals(0, key.getTransactionId()); + assertEquals(0, key.getWriteId()); assertEquals(bucketProperty, key.getBucketProperty()); assertEquals(4, key.getRowId()); - assertEquals(0, key.getCurrentTransactionId()); + assertEquals(0, key.getCurrentWriteId()); pair.next(pair.nextRecord()); assertEquals(null, pair.nextRecord()); @@ -452,13 +452,13 @@ public void testNewBase() throws Exception { OrcStruct event = merger.createValue(); assertEquals(true, merger.next(id, event)); - assertEquals(10, id.getTransactionId()); + assertEquals(10, id.getWriteId()); assertEquals(20, id.getBucketProperty()); assertEquals(40, id.getRowId()); assertEquals("third", getValue(event)); assertEquals(true, merger.next(id, event)); - assertEquals(40, id.getTransactionId()); + assertEquals(40, id.getWriteId()); assertEquals(50, id.getBucketProperty()); assertEquals(60, id.getRowId()); assertEquals("fourth", getValue(event)); @@ -477,9 +477,9 @@ public void testNewBase() throws Exception { assertEquals("operation", fields.get(OrcRecordUpdater.OPERATION).getFieldName()); assertEquals("currentTransaction", - fields.get(OrcRecordUpdater.CURRENT_TRANSACTION).getFieldName()); + fields.get(OrcRecordUpdater.CURRENT_WRITEID).getFieldName()); assertEquals("originalTransaction", - fields.get(OrcRecordUpdater.ORIGINAL_TRANSACTION).getFieldName()); + fields.get(OrcRecordUpdater.ORIGINAL_WRITEID).getFieldName()); assertEquals("bucket", fields.get(OrcRecordUpdater.BUCKET).getFieldName()); assertEquals("rowId", @@ -538,15 +538,15 @@ public void testGetLogicalLength() throws Exception { } /*create delta_1_1_0/bucket0 with 1 row and close the file*/ AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) - .inspector(inspector).bucket(BUCKET).writingBase(false).minimumTransactionId(1) - .maximumTransactionId(1).finalDestination(root); + .inspector(inspector).bucket(BUCKET).writingBase(false).minimumWriteId(1) + .maximumWriteId(1).finalDestination(root); Path delta1_1_0 = new Path(root, AcidUtils.deltaSubdir( - options.getMinimumTransactionId(), options.getMaximumTransactionId(), options.getStatementId())); + options.getMinimumWriteId(), options.getMaximumWriteId(), options.getStatementId())); Path bucket0 = AcidUtils.createBucketFile(delta1_1_0, BUCKET); Path bucket0SideFile = OrcAcidUtils.getSideFile(bucket0); RecordUpdater ru = of.getRecordUpdater(root, options); - ru.insert(options.getMaximumTransactionId(), new MyRow("first")); + ru.insert(options.getMaximumWriteId(), new MyRow("first")); ru.close(false); FileStatus bucket0File = fs.getFileStatus(bucket0); @@ -581,7 +581,7 @@ public void testEmpty() throws Exception { // write the empty base AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) .inspector(inspector).bucket(BUCKET).writingBase(true) - .maximumTransactionId(100).finalDestination(root); + .maximumWriteId(100).finalDestination(root); of.getRecordUpdater(root, options).close(false); { /*OrcRecordUpdater is inconsistent about when it creates empty files and when it does not. @@ -593,8 +593,8 @@ public void testEmpty() throws Exception { AcidUtils.baseDir(100)), BUCKET), wo); w.close(); } - ValidTxnList txnList = new ValidReadTxnList("200:" + Long.MAX_VALUE); - AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, txnList); + ValidWriteIdList writeIdList = new ValidReaderWriteIdList("testEmpty:200:" + Long.MAX_VALUE); + AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, writeIdList); Path basePath = AcidUtils.createBucketFile(directory.getBaseDirectory(), BUCKET); @@ -646,7 +646,7 @@ private void testNewBaseAndDelta(boolean use130Format) throws Exception { options.statementId(-1); } RecordUpdater ru = of.getRecordUpdater(root, - options.writingBase(true).maximumTransactionId(100)); + options.writingBase(true).maximumWriteId(100)); for(String v: values) { ru.insert(0, new MyRow(v)); } @@ -654,7 +654,7 @@ private void testNewBaseAndDelta(boolean use130Format) throws Exception { // write a delta ru = of.getRecordUpdater(root, options.writingBase(false) - .minimumTransactionId(200).maximumTransactionId(200).recordIdColumn(1)); + .minimumWriteId(200).maximumWriteId(200).recordIdColumn(1)); ru.update(200, new MyRow("update 1", 0, 0, BUCKET_PROPERTY)); ru.update(200, new MyRow("update 2", 2, 0, BUCKET_PROPERTY)); ru.update(200, new MyRow("update 3", 3, 0, BUCKET_PROPERTY)); @@ -662,8 +662,8 @@ private void testNewBaseAndDelta(boolean use130Format) throws Exception { ru.delete(200, new MyRow("", 8, 0, BUCKET_PROPERTY)); ru.close(false); - ValidTxnList txnList = new ValidReadTxnList("200:" + Long.MAX_VALUE); - AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, txnList); + ValidWriteIdList writeIdList = new ValidReaderWriteIdList("testNewBaseAndDelta:200:" + Long.MAX_VALUE); + AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, writeIdList); assertEquals(new Path(root, "base_0000100"), directory.getBaseDirectory()); assertEquals(new Path(root, use130Format ? @@ -978,13 +978,13 @@ private void testNewBaseAndDelta(boolean use130Format) throws Exception { merger.close(); // try ignoring the 200 transaction and make sure it works still - ValidTxnList txns = new ValidReadTxnList("2000:200:200"); + ValidWriteIdList writeIds = new ValidReaderWriteIdList("testNewBaseAndDelta:2000:200:200"); //again 1st split is for base/ baseReader = OrcFile.createReader(basePath, OrcFile.readerOptions(conf)); merger = new OrcRawRecordMerger(conf, false, baseReader, false, BUCKET, - txns, new Reader.Options(), + writeIds, new Reader.Options(), new Path[] {deleteDeltaDir}, new OrcRawRecordMerger.Options().isCompacting(false)); assertEquals(null, merger.getMinKey()); @@ -1006,7 +1006,7 @@ private void testNewBaseAndDelta(boolean use130Format) throws Exception { OrcFile.readerOptions(conf)); merger = new OrcRawRecordMerger(conf, false, baseReader, false, BUCKET, - txns, new Reader.Options(), + writeIds, new Reader.Options(), new Path[] {deleteDeltaDir}, new OrcRawRecordMerger.Options().isCompacting(false)); assertEquals(null, merger.getMinKey()); @@ -1101,7 +1101,7 @@ public synchronized void addedRow(int rows) throws IOException { // write a delta AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) - .writingBase(false).minimumTransactionId(1).maximumTransactionId(1) + .writingBase(false).minimumWriteId(1).maximumWriteId(1) .bucket(BUCKET).inspector(inspector).filesystem(fs).recordIdColumn(5) .finalDestination(root); @@ -1119,7 +1119,7 @@ public synchronized void addedRow(int rows) throws IOException { ru.close(false);//this doesn't create a key index presumably because writerOptions are not set on 'options' // write a delta - options = options.minimumTransactionId(100).maximumTransactionId(100); + options = options.minimumWriteId(100).maximumWriteId(100); ru = of.getRecordUpdater(root, options); values = new String[]{null, null, "1.0", null, null, null, null, "3.1"}; for(int i=0; i < values.length - 1; ++i) { @@ -1221,7 +1221,7 @@ public synchronized void addedRow(int rows) throws IOException { // make 5 stripes with 2 rows each OrcRecordUpdater.OrcOptions options = (OrcRecordUpdater.OrcOptions) new OrcRecordUpdater.OrcOptions(conf) - .writingBase(true).minimumTransactionId(0).maximumTransactionId(0) + .writingBase(true).minimumWriteId(0).maximumWriteId(0) .bucket(BUCKET).inspector(inspector).filesystem(fs); final int BUCKET_PROPERTY = BucketCodec.V1.encode(options); @@ -1239,7 +1239,7 @@ public synchronized void addedRow(int rows) throws IOException { ru.close(false); // write a delta - options.writingBase(false).minimumTransactionId(1).maximumTransactionId(1) + options.writingBase(false).minimumWriteId(1).maximumWriteId(1) .recordIdColumn(5); ru = of.getRecordUpdater(root, options); values = new String[]{"0.0", null, null, "1.1", null, null, null, @@ -1253,7 +1253,7 @@ public synchronized void addedRow(int rows) throws IOException { ru.close(false); // write a delta - options.minimumTransactionId(100).maximumTransactionId(100); + options.minimumWriteId(100).maximumWriteId(100); ru = of.getRecordUpdater(root, options); values = new String[]{null, null, "1.0", null, null, null, null, "3.1"}; for(int i=0; i < values.length - 1; ++i) { @@ -1356,7 +1356,7 @@ public void testRecordReaderDelta() throws Exception { AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) .bucket(BUCKET).inspector(inspector).filesystem(fs) - .writingBase(false).minimumTransactionId(1).maximumTransactionId(1) + .writingBase(false).minimumWriteId(1).maximumWriteId(1) .finalDestination(root); RecordUpdater ru = of.getRecordUpdater(root, options); String[][] values = {new String[]{"a", "b", "c", "d", "e"}, new String[]{"f", "g", "h", "i", "j"}}; @@ -1366,7 +1366,7 @@ public void testRecordReaderDelta() throws Exception { ru.close(false); // write a delta - options.minimumTransactionId(2).maximumTransactionId(2); + options.minimumWriteId(2).maximumWriteId(2); ru = of.getRecordUpdater(root, options); for(int i=0; i < values[1].length; ++i) { ru.insert(2, new MyRow(values[1][i])); @@ -1429,7 +1429,7 @@ private void testRecordReaderIncompleteDelta(boolean use130Format) throws Except // write a base AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) - .writingBase(true).minimumTransactionId(0).maximumTransactionId(0) + .writingBase(true).minimumWriteId(0).maximumWriteId(0) .bucket(BUCKET).inspector(inspector).filesystem(fs).finalDestination(root); if(!use130Format) { options.statementId(-1); @@ -1442,8 +1442,8 @@ private void testRecordReaderIncompleteDelta(boolean use130Format) throws Except ru.close(false); // write a delta - options.writingBase(false).minimumTransactionId(10) - .maximumTransactionId(19); + options.writingBase(false).minimumWriteId(10) + .maximumWriteId(19); ru = of.getRecordUpdater(root, options); values = new String[]{"6", "7", "8"}; for(int i=0; i < values.length; ++i) { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java index 7914f0c..ef6dbbb 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java @@ -53,9 +53,9 @@ public void testAccessors() throws Exception { OrcStruct event = new OrcStruct(OrcRecordUpdater.FIELDS); event.setFieldValue(OrcRecordUpdater.OPERATION, new IntWritable(OrcRecordUpdater.INSERT_OPERATION)); - event.setFieldValue(OrcRecordUpdater.CURRENT_TRANSACTION, + event.setFieldValue(OrcRecordUpdater.CURRENT_WRITEID, new LongWritable(100)); - event.setFieldValue(OrcRecordUpdater.ORIGINAL_TRANSACTION, + event.setFieldValue(OrcRecordUpdater.ORIGINAL_WRITEID, new LongWritable(50)); event.setFieldValue(OrcRecordUpdater.BUCKET, new IntWritable(200)); event.setFieldValue(OrcRecordUpdater.ROW_ID, new LongWritable(300)); @@ -101,8 +101,8 @@ public void testWriter() throws Exception { .filesystem(fs) .bucket(10) .writingBase(false) - .minimumTransactionId(10) - .maximumTransactionId(19) + .minimumWriteId(10) + .maximumWriteId(19) .inspector(inspector) .reporter(Reporter.NULL) .finalDestination(root); @@ -210,8 +210,8 @@ public void testWriterTblProperties() throws Exception { .filesystem(fs) .bucket(10) .writingBase(false) - .minimumTransactionId(10) - .maximumTransactionId(19) + .minimumWriteId(10) + .maximumWriteId(19) .inspector(inspector) .reporter(Reporter.NULL) .finalDestination(root) @@ -252,8 +252,8 @@ public void testUpdates() throws Exception { .filesystem(fs) .bucket(bucket) .writingBase(false) - .minimumTransactionId(100) - .maximumTransactionId(100) + .minimumWriteId(100) + .maximumWriteId(100) .inspector(inspector) .reporter(Reporter.NULL) .recordIdColumn(1) diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java index 65508f4..c82a088 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; @@ -117,8 +117,8 @@ public void setup() throws Exception { .filesystem(fs) .bucket(bucket) .writingBase(false) - .minimumTransactionId(1) - .maximumTransactionId(NUM_OTID) + .minimumWriteId(1) + .maximumWriteId(NUM_OTID) .inspector(inspector) .reporter(Reporter.NULL) .recordIdColumn(1) @@ -141,7 +141,7 @@ public void setup() throws Exception { // Create a delete delta that has rowIds divisible by 2 but not by 3. This will produce // a delete delta file with 50,000 delete events. long currTxnId = NUM_OTID + 1; - options.minimumTransactionId(currTxnId).maximumTransactionId(currTxnId); + options.minimumWriteId(currTxnId).maximumWriteId(currTxnId); updater = new OrcRecordUpdater(root, options); for (long i = 1; i <= NUM_OTID; ++i) { for (long j = 0; j < NUM_ROWID_PER_OTID; j += 1) { @@ -154,7 +154,7 @@ public void setup() throws Exception { // Now, create a delete delta that has rowIds divisible by 3 but not by 2. This will produce // a delete delta file with 25,000 delete events. currTxnId = NUM_OTID + 2; - options.minimumTransactionId(currTxnId).maximumTransactionId(currTxnId); + options.minimumWriteId(currTxnId).maximumWriteId(currTxnId); updater = new OrcRecordUpdater(root, options); for (long i = 1; i <= NUM_OTID; ++i) { for (long j = 0; j < NUM_ROWID_PER_OTID; j += 1) { @@ -167,7 +167,7 @@ public void setup() throws Exception { // Now, create a delete delta that has rowIds divisible by both 3 and 2. This will produce // a delete delta file with 25,000 delete events. currTxnId = NUM_OTID + 3; - options.minimumTransactionId(currTxnId).maximumTransactionId(currTxnId); + options.minimumWriteId(currTxnId).maximumWriteId(currTxnId); updater = new OrcRecordUpdater(root, options); for (long i = 1; i <= NUM_OTID; ++i) { for (long j = 0; j < NUM_ROWID_PER_OTID; j += 1) { @@ -216,7 +216,7 @@ private void testVectorizedOrcAcidRowBatchReader(String deleteEventRegistry) thr List splits = getSplits(); // Mark one of the transactions as an exception to test that invalid transactions // are being handled properly. - conf.set(ValidTxnList.VALID_TXNS_KEY, "14:1:1:5"); // Exclude transaction 5 + conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, "tbl:14:1:1:5"); // Exclude transaction 5 VectorizedOrcAcidRowBatchReader vectorizedReader = new VectorizedOrcAcidRowBatchReader(splits.get(0), conf, Reporter.NULL, new VectorizedRowBatchCtx()); if (deleteEventRegistry.equals(ColumnizedDeleteEventRegistry.class.getName())) { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java index 3c007a7..a40ad24 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java @@ -164,7 +164,7 @@ public void testMergePathValidMoveWorkReturnsNewMoveWork() { TableDesc tableDesc = new TableDesc(); reset(mockWork); when(mockWork.getLoadTableWork()).thenReturn(new LoadTableDesc( - condOutputPath, tableDesc, null, null)); + condOutputPath, tableDesc, null)); newWork = GenMapRedUtils.mergeMovePaths(condInputPath, mockWork, lineageState); assertNotNull(newWork); assertNotEquals(newWork, mockWork); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java index c337fd5..3117faa 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java @@ -24,11 +24,15 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.TransactionalValidationListener; import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdRequest; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdResponse; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -41,6 +45,7 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TxnAbortedException; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -69,6 +74,7 @@ import java.io.IOException; import java.nio.file.Files; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -140,6 +146,7 @@ protected Table newTable(String dbName, String tableName, boolean partitioned, boolean isTemporary) throws TException { Table table = new Table(); + table.setTableType(TableType.MANAGED_TABLE.name()); table.setTableName(tableName); table.setDbName(dbName); table.setOwner("me"); @@ -150,6 +157,16 @@ protected Table newTable(String dbName, String tableName, boolean partitioned, table.setPartitionKeys(partKeys); } + // Set the table as transactional for compaction to work + if (parameters == null) { + parameters = new HashMap<>(); + } + parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true"); + if (sortCols != null) { + // Sort columns are not allowed for full ACID table. So, change it to insert-only table + parameters.put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, + TransactionalValidationListener.INSERTONLY_TRANSACTIONAL_PROPERTY); + } table.setParameters(parameters); if (isTemporary) table.setTemporary(true); @@ -181,6 +198,12 @@ protected long openTxn() throws MetaException { return txns.get(0); } + protected long allocateWriteId(String dbName, String tblName, long txnid) throws MetaException, TxnAbortedException, NoSuchTxnException { + AllocateTableWriteIdRequest awiRqst = new AllocateTableWriteIdRequest(Collections.singletonList(txnid), dbName, tblName); + AllocateTableWriteIdResponse awiResp = txnHandler.allocateTableWriteId(awiRqst); + return awiResp.getTxnToWriteIds().get(0).getWriteId(); + } + protected void addDeltaFile(Table t, Partition p, long minTxn, long maxTxn, int numRecords) throws Exception { addFile(t, p, minTxn, maxTxn, numRecords, FileType.DELTA, 2, true); @@ -220,15 +243,19 @@ protected void addBaseFile(Table t, Partition p, long maxTxn, int numRecords, in return paths; } - protected void burnThroughTransactions(int num) + protected void burnThroughTransactions(String dbName, String tblName, int num) throws MetaException, NoSuchTxnException, TxnAbortedException { - burnThroughTransactions(num, null, null); + burnThroughTransactions(dbName, tblName, num, null, null); } - protected void burnThroughTransactions(int num, Set open, Set aborted) + protected void burnThroughTransactions(String dbName, String tblName, int num, Set open, Set aborted) throws MetaException, NoSuchTxnException, TxnAbortedException { OpenTxnsResponse rsp = txnHandler.openTxns(new OpenTxnRequest(num, "me", "localhost")); + AllocateTableWriteIdRequest awiRqst = new AllocateTableWriteIdRequest(rsp.getTxn_ids(), dbName, tblName); + AllocateTableWriteIdResponse awiResp = txnHandler.allocateTableWriteId(awiRqst); + int i = 0; for (long tid : rsp.getTxn_ids()) { + assert(awiResp.getTxnToWriteIds().get(i++).getTxnId() == tid); if (aborted != null && aborted.contains(tid)) { txnHandler.abortTxn(new AbortTxnRequest(tid)); } else if (open == null || (open != null && !open.contains(tid))) { @@ -350,7 +377,7 @@ private void addFile(Table t, Partition p, long minTxn, long maxTxn, @Override public RawReader getRawReader(Configuration conf, boolean collapseEvents, int bucket, - ValidTxnList validTxnList, + ValidWriteIdList validWriteIdList, Path baseDirectory, Path... deltaDirectory) throws IOException { List filesToRead = new ArrayList(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java index db8e46c..3ca073c 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java @@ -76,7 +76,7 @@ public void cleanupAfterMajorTableCompaction() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addBaseFile(t, null, 25L, 25); - burnThroughTransactions(25); + burnThroughTransactions("default", "camtc", 25); CompactionRequest rqst = new CompactionRequest("default", "camtc", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -107,7 +107,7 @@ public void cleanupAfterMajorPartitionCompaction() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addBaseFile(t, p, 25L, 25); - burnThroughTransactions(25); + burnThroughTransactions("default", "campc", 25); CompactionRequest rqst = new CompactionRequest("default", "campc", CompactionType.MAJOR); rqst.setPartitionname("ds=today"); @@ -138,7 +138,7 @@ public void cleanupAfterMinorTableCompaction() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addDeltaFile(t, null, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "camitc", 25); CompactionRequest rqst = new CompactionRequest("default", "camitc", CompactionType.MINOR); txnHandler.compact(rqst); @@ -176,7 +176,7 @@ public void cleanupAfterMinorPartitionCompaction() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "camipc", 25); CompactionRequest rqst = new CompactionRequest("default", "camipc", CompactionType.MINOR); rqst.setPartitionname("ds=today"); @@ -214,7 +214,7 @@ public void blockedByLockTable() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addDeltaFile(t, null, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "bblt", 25); CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR); txnHandler.compact(rqst); @@ -251,7 +251,7 @@ public void blockedByLockPartition() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "bblp", 25); CompactionRequest rqst = new CompactionRequest("default", "bblp", CompactionType.MINOR); rqst.setPartitionname("ds=today"); @@ -296,7 +296,7 @@ public void notBlockedBySubsequentLock() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addDeltaFile(t, null, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "bblt", 25); CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR); txnHandler.compact(rqst); @@ -368,7 +368,7 @@ public void partitionNotBlockedBySubsequentLock() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "bblt", 25); CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR); rqst.setPartitionname("ds=today"); @@ -439,7 +439,7 @@ public void cleanupAfterMajorPartitionCompactionNoBase() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addBaseFile(t, p, 25L, 25); - burnThroughTransactions(25); + burnThroughTransactions("default", "campcnb", 25); CompactionRequest rqst = new CompactionRequest("default", "campcnb", CompactionType.MAJOR); rqst.setPartitionname("ds=today"); @@ -469,7 +469,7 @@ public void droppedTable() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addBaseFile(t, null, 25L, 25); - burnThroughTransactions(25); + burnThroughTransactions("default", "dt", 25); CompactionRequest rqst = new CompactionRequest("default", "dt", CompactionType.MINOR); txnHandler.compact(rqst); @@ -477,14 +477,14 @@ public void droppedTable() throws Exception { txnHandler.markCompacted(ci); txnHandler.setRunAs(ci.id, System.getProperty("user.name")); + // Drop table will clean the table entry from the compaction queue and hence cleaner have no effect ms.dropTable("default", "dt"); startCleaner(); // Check there are no compactions requests left. ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); - Assert.assertEquals(1, rsp.getCompactsSize()); - Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState())); + Assert.assertEquals(0, rsp.getCompactsSize()); } @Test @@ -496,7 +496,7 @@ public void droppedPartition() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addBaseFile(t, p, 25L, 25); - burnThroughTransactions(25); + burnThroughTransactions("default", "dp", 25); CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MAJOR); rqst.setPartitionname("ds=today"); @@ -505,14 +505,14 @@ public void droppedPartition() throws Exception { txnHandler.markCompacted(ci); txnHandler.setRunAs(ci.id, System.getProperty("user.name")); + // Drop partition will clean the partition entry from the compaction queue and hence cleaner have no effect ms.dropPartition("default", "dp", Collections.singletonList("today"), true); startCleaner(); // Check there are no compactions requests left. ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); - Assert.assertEquals(1, rsp.getCompactsSize()); - Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState())); + Assert.assertEquals(0, rsp.getCompactsSize()); } @Override boolean useHive130DeltaDirName() { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java index f35826e..35dc002 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java @@ -339,7 +339,7 @@ public void compactTableHighDeltaPct() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(23); + burnThroughTransactions("default", "cthdp", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -350,6 +350,8 @@ public void compactTableHighDeltaPct() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "cthdp", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -371,7 +373,7 @@ public void compactPartitionHighDeltaPct() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(23); + burnThroughTransactions("default", "cphdp", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -383,6 +385,8 @@ public void compactPartitionHighDeltaPct() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "cphdp", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -404,7 +408,7 @@ public void noCompactTableDeltaPctNotHighEnough() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(53); + burnThroughTransactions("default", "nctdpnhe", 53); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -415,6 +419,8 @@ public void noCompactTableDeltaPctNotHighEnough() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "nctdpnhe", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -440,7 +446,7 @@ public void compactTableTooManyDeltas() throws Exception { addDeltaFile(t, null, 210L, 210L, 1); addDeltaFile(t, null, 211L, 211L, 1); - burnThroughTransactions(210); + burnThroughTransactions("default", "cttmd", 210); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -451,6 +457,8 @@ public void compactTableTooManyDeltas() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "cttmd", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -481,7 +489,7 @@ public void compactPartitionTooManyDeltas() throws Exception { addDeltaFile(t, p, 210L, 210L, 1); addDeltaFile(t, p, 211L, 211L, 1); - burnThroughTransactions(210); + burnThroughTransactions("default", "cptmd", 210); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -493,6 +501,8 @@ public void compactPartitionTooManyDeltas() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "cptmd", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -514,7 +524,7 @@ public void noCompactTableNotEnoughDeltas() throws Exception { addDeltaFile(t, null, 201L, 205L, 5); addDeltaFile(t, null, 206L, 211L, 6); - burnThroughTransactions(210); + burnThroughTransactions("default", "nctned", 210); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -525,6 +535,8 @@ public void noCompactTableNotEnoughDeltas() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "nctned", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -550,7 +562,7 @@ public void chooseMajorOverMinorWhenBothValid() throws Exception { addDeltaFile(t, null, 300L, 310L, 11); addDeltaFile(t, null, 311L, 321L, 11); - burnThroughTransactions(320); + burnThroughTransactions("default", "cmomwbv", 320); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -561,6 +573,8 @@ public void chooseMajorOverMinorWhenBothValid() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "cmomwbv", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -590,7 +604,7 @@ public void enoughDeltasNoBase() throws Exception { addDeltaFile(t, p, 210L, 210L, 1); addDeltaFile(t, p, 211L, 211L, 1); - burnThroughTransactions(210); + burnThroughTransactions("default", "ednb", 210); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -602,6 +616,8 @@ public void enoughDeltasNoBase() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "ednb", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -624,7 +640,7 @@ public void twoTxnsOnSamePartitionGenerateOneCompactionRequest() throws Exceptio addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(23); + burnThroughTransactions("default", "ttospgocr", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -636,6 +652,8 @@ public void twoTxnsOnSamePartitionGenerateOneCompactionRequest() throws Exceptio LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "ttospgocr", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); txnid = openTxn(); @@ -648,6 +666,8 @@ public void twoTxnsOnSamePartitionGenerateOneCompactionRequest() throws Exceptio req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); res = txnHandler.lock(req); + writeid = allocateWriteId("default", "ttospgocr", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -670,7 +690,7 @@ public void noCompactTableDynamicPartitioning() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(23); + burnThroughTransactions("default", "nctdp", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -681,6 +701,8 @@ public void noCompactTableDynamicPartitioning() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "nctdp", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -698,7 +720,7 @@ public void dropTable() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(23); + burnThroughTransactions("default", "dt", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -709,6 +731,8 @@ public void dropTable() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "dt", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); ms.dropTable("default", "dt"); @@ -729,7 +753,7 @@ public void dropPartition() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(23); + burnThroughTransactions("default", "dp", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -741,6 +765,8 @@ public void dropPartition() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); + long writeid = allocateWriteId("default", "dp", txnid); + assert(writeid==txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); ms.dropPartition("default", "dp", Collections.singletonList("today"), true); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java index 0638126..9d3a79f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java @@ -236,7 +236,7 @@ public void sortedTable() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addDeltaFile(t, null, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "st", 25); CompactionRequest rqst = new CompactionRequest("default", "st", CompactionType.MINOR); txnHandler.compact(rqst); @@ -262,7 +262,7 @@ public void sortedPartition() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); - burnThroughTransactions(25); + burnThroughTransactions("default", "sp", 25); CompactionRequest rqst = new CompactionRequest("default", "sp", CompactionType.MINOR); rqst.setPartitionname("ds=today"); @@ -285,7 +285,7 @@ public void minorTableWithBase() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "mtwb", 25); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR); txnHandler.compact(rqst); @@ -344,7 +344,7 @@ public void minorWithOpenInMiddle() throws Exception { addDeltaFile(t, null, 23L, 25L, 3); addLengthFile(t, null, 23L, 25L, 3); addDeltaFile(t, null, 26L, 27L, 2); - burnThroughTransactions(27, new HashSet(Arrays.asList(23L)), null); + burnThroughTransactions("default", "mtwb", 27, new HashSet(Arrays.asList(23L)), null); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR); txnHandler.compact(rqst); @@ -380,7 +380,7 @@ public void minorWithAborted() throws Exception { addDeltaFile(t, null, 23L, 25L, 3); addLengthFile(t, null, 23L, 25L, 3); addDeltaFile(t, null, 26L, 27L, 2); - burnThroughTransactions(27, null, new HashSet(Arrays.asList(24L, 25L))); + burnThroughTransactions("default", "mtwb", 27, null, new HashSet(Arrays.asList(24L, 25L))); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR); txnHandler.compact(rqst); @@ -416,7 +416,7 @@ public void minorPartitionWithBase() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "mpwb", 25); CompactionRequest rqst = new CompactionRequest("default", "mpwb", CompactionType.MINOR); rqst.setPartitionname("ds=today"); @@ -469,7 +469,7 @@ public void minorTableNoBase() throws Exception { addDeltaFile(t, null, 1L, 2L, 2); addDeltaFile(t, null, 3L, 4L, 2); - burnThroughTransactions(5); + burnThroughTransactions("default", "mtnb", 5); CompactionRequest rqst = new CompactionRequest("default", "mtnb", CompactionType.MINOR); txnHandler.compact(rqst); @@ -522,7 +522,7 @@ public void majorTableWithBase() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "matwb", 25); CompactionRequest rqst = new CompactionRequest("default", "matwb", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -598,7 +598,7 @@ private void compactNoBaseLotsOfDeltas(CompactionType type) throws Exception { * and then the 'requested' * minor compaction to combine delta_21_23, delta_25_33 and delta_35_35 to make delta_21_35 * or major compaction to create base_35*/ - burnThroughTransactions(35); + burnThroughTransactions("default", "mapwb", 35); CompactionRequest rqst = new CompactionRequest("default", "mapwb", type); rqst.setPartitionname("ds=today"); txnHandler.compact(rqst); @@ -690,7 +690,7 @@ public void majorPartitionWithBase() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "mapwb", 25); CompactionRequest rqst = new CompactionRequest("default", "mapwb", CompactionType.MAJOR); rqst.setPartitionname("ds=today"); @@ -734,7 +734,7 @@ public void majorTableNoBase() throws Exception { addDeltaFile(t, null, 1L, 2L, 2); addDeltaFile(t, null, 3L, 4L, 2); - burnThroughTransactions(4); + burnThroughTransactions("default", "matnb", 4); CompactionRequest rqst = new CompactionRequest("default", "matnb", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -778,7 +778,7 @@ public void majorTableLegacy() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "matl", 25); CompactionRequest rqst = new CompactionRequest("default", "matl", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -822,7 +822,7 @@ public void minorTableLegacy() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "mtl", 25); CompactionRequest rqst = new CompactionRequest("default", "mtl", CompactionType.MINOR); txnHandler.compact(rqst); @@ -865,7 +865,7 @@ public void majorPartitionWithBaseMissingBuckets() throws Exception { addDeltaFile(t, p, 21L, 22L, 2, 2, false); addDeltaFile(t, p, 23L, 26L, 4); - burnThroughTransactions(27); + burnThroughTransactions("default", "mapwbmb", 27); CompactionRequest rqst = new CompactionRequest("default", "mapwbmb", CompactionType.MAJOR); rqst.setPartitionname("ds=today"); @@ -919,7 +919,7 @@ public void majorWithOpenInMiddle() throws Exception { addDeltaFile(t, null, 23L, 25L, 3); addLengthFile(t, null, 23L, 25L, 3); addDeltaFile(t, null, 26L, 27L, 2); - burnThroughTransactions(27, new HashSet(Arrays.asList(23L)), null); + burnThroughTransactions("default", "mtwb", 27, new HashSet(Arrays.asList(23L)), null); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -955,7 +955,7 @@ public void majorWithAborted() throws Exception { addDeltaFile(t, null, 23L, 25L, 3); addLengthFile(t, null, 23L, 25L, 3); addDeltaFile(t, null, 26L, 27L, 2); - burnThroughTransactions(27, null, new HashSet(Arrays.asList(24L, 25L))); + burnThroughTransactions("default", "mtwb", 27, null, new HashSet(Arrays.asList(24L, 25L))); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -991,19 +991,19 @@ public void droppedTable() throws Exception { addDeltaFile(t, null, 1L, 2L, 2); addDeltaFile(t, null, 3L, 4L, 2); - burnThroughTransactions(4); + burnThroughTransactions("default", "dt", 4); CompactionRequest rqst = new CompactionRequest("default", "dt", CompactionType.MAJOR); txnHandler.compact(rqst); + // Drop table will clean the table entry from the compaction queue and hence worker have no effect ms.dropTable("default", "dt"); startWorker(); ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); List compacts = rsp.getCompacts(); - Assert.assertEquals(1, compacts.size()); - Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(compacts.get(0).getState())); + Assert.assertEquals(0, compacts.size()); } @Test @@ -1015,20 +1015,20 @@ public void droppedPartition() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions(25); + burnThroughTransactions("default", "dp", 25); CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MINOR); rqst.setPartitionname("ds=today"); txnHandler.compact(rqst); + // Drop partition will clean the partition entry from the compaction queue and hence worker have no effect ms.dropPartition("default", "dp", Collections.singletonList("today"), true); startWorker(); ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); List compacts = rsp.getCompacts(); - Assert.assertEquals(1, compacts.size()); - Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState())); + Assert.assertEquals(0, compacts.size()); } @After diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 8b78230..925e93e 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1056; - ::apache::thrift::protocol::TType _etype1059; - xfer += iprot->readListBegin(_etype1059, _size1056); - this->success.resize(_size1056); - uint32_t _i1060; - for (_i1060 = 0; _i1060 < _size1056; ++_i1060) + uint32_t _size1100; + ::apache::thrift::protocol::TType _etype1103; + xfer += iprot->readListBegin(_etype1103, _size1100); + this->success.resize(_size1100); + uint32_t _i1104; + for (_i1104 = 0; _i1104 < _size1100; ++_i1104) { - xfer += iprot->readString(this->success[_i1060]); + xfer += iprot->readString(this->success[_i1104]); } xfer += iprot->readListEnd(); } @@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1061; - for (_iter1061 = this->success.begin(); _iter1061 != this->success.end(); ++_iter1061) + std::vector ::const_iterator _iter1105; + for (_iter1105 = this->success.begin(); _iter1105 != this->success.end(); ++_iter1105) { - xfer += oprot->writeString((*_iter1061)); + xfer += oprot->writeString((*_iter1105)); } xfer += oprot->writeListEnd(); } @@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1062; - ::apache::thrift::protocol::TType _etype1065; - xfer += iprot->readListBegin(_etype1065, _size1062); - (*(this->success)).resize(_size1062); - uint32_t _i1066; - for (_i1066 = 0; _i1066 < _size1062; ++_i1066) + uint32_t _size1106; + ::apache::thrift::protocol::TType _etype1109; + xfer += iprot->readListBegin(_etype1109, _size1106); + (*(this->success)).resize(_size1106); + uint32_t _i1110; + for (_i1110 = 0; _i1110 < _size1106; ++_i1110) { - xfer += iprot->readString((*(this->success))[_i1066]); + xfer += iprot->readString((*(this->success))[_i1110]); } xfer += iprot->readListEnd(); } @@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1067; - ::apache::thrift::protocol::TType _etype1070; - xfer += iprot->readListBegin(_etype1070, _size1067); - this->success.resize(_size1067); - uint32_t _i1071; - for (_i1071 = 0; _i1071 < _size1067; ++_i1071) + uint32_t _size1111; + ::apache::thrift::protocol::TType _etype1114; + xfer += iprot->readListBegin(_etype1114, _size1111); + this->success.resize(_size1111); + uint32_t _i1115; + for (_i1115 = 0; _i1115 < _size1111; ++_i1115) { - xfer += iprot->readString(this->success[_i1071]); + xfer += iprot->readString(this->success[_i1115]); } xfer += iprot->readListEnd(); } @@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1072; - for (_iter1072 = this->success.begin(); _iter1072 != this->success.end(); ++_iter1072) + std::vector ::const_iterator _iter1116; + for (_iter1116 = this->success.begin(); _iter1116 != this->success.end(); ++_iter1116) { - xfer += oprot->writeString((*_iter1072)); + xfer += oprot->writeString((*_iter1116)); } xfer += oprot->writeListEnd(); } @@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1073; - ::apache::thrift::protocol::TType _etype1076; - xfer += iprot->readListBegin(_etype1076, _size1073); - (*(this->success)).resize(_size1073); - uint32_t _i1077; - for (_i1077 = 0; _i1077 < _size1073; ++_i1077) + uint32_t _size1117; + ::apache::thrift::protocol::TType _etype1120; + xfer += iprot->readListBegin(_etype1120, _size1117); + (*(this->success)).resize(_size1117); + uint32_t _i1121; + for (_i1121 = 0; _i1121 < _size1117; ++_i1121) { - xfer += iprot->readString((*(this->success))[_i1077]); + xfer += iprot->readString((*(this->success))[_i1121]); } xfer += iprot->readListEnd(); } @@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1078; - ::apache::thrift::protocol::TType _ktype1079; - ::apache::thrift::protocol::TType _vtype1080; - xfer += iprot->readMapBegin(_ktype1079, _vtype1080, _size1078); - uint32_t _i1082; - for (_i1082 = 0; _i1082 < _size1078; ++_i1082) + uint32_t _size1122; + ::apache::thrift::protocol::TType _ktype1123; + ::apache::thrift::protocol::TType _vtype1124; + xfer += iprot->readMapBegin(_ktype1123, _vtype1124, _size1122); + uint32_t _i1126; + for (_i1126 = 0; _i1126 < _size1122; ++_i1126) { - std::string _key1083; - xfer += iprot->readString(_key1083); - Type& _val1084 = this->success[_key1083]; - xfer += _val1084.read(iprot); + std::string _key1127; + xfer += iprot->readString(_key1127); + Type& _val1128 = this->success[_key1127]; + xfer += _val1128.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1085; - for (_iter1085 = this->success.begin(); _iter1085 != this->success.end(); ++_iter1085) + std::map ::const_iterator _iter1129; + for (_iter1129 = this->success.begin(); _iter1129 != this->success.end(); ++_iter1129) { - xfer += oprot->writeString(_iter1085->first); - xfer += _iter1085->second.write(oprot); + xfer += oprot->writeString(_iter1129->first); + xfer += _iter1129->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1086; - ::apache::thrift::protocol::TType _ktype1087; - ::apache::thrift::protocol::TType _vtype1088; - xfer += iprot->readMapBegin(_ktype1087, _vtype1088, _size1086); - uint32_t _i1090; - for (_i1090 = 0; _i1090 < _size1086; ++_i1090) + uint32_t _size1130; + ::apache::thrift::protocol::TType _ktype1131; + ::apache::thrift::protocol::TType _vtype1132; + xfer += iprot->readMapBegin(_ktype1131, _vtype1132, _size1130); + uint32_t _i1134; + for (_i1134 = 0; _i1134 < _size1130; ++_i1134) { - std::string _key1091; - xfer += iprot->readString(_key1091); - Type& _val1092 = (*(this->success))[_key1091]; - xfer += _val1092.read(iprot); + std::string _key1135; + xfer += iprot->readString(_key1135); + Type& _val1136 = (*(this->success))[_key1135]; + xfer += _val1136.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1093; - ::apache::thrift::protocol::TType _etype1096; - xfer += iprot->readListBegin(_etype1096, _size1093); - this->success.resize(_size1093); - uint32_t _i1097; - for (_i1097 = 0; _i1097 < _size1093; ++_i1097) + uint32_t _size1137; + ::apache::thrift::protocol::TType _etype1140; + xfer += iprot->readListBegin(_etype1140, _size1137); + this->success.resize(_size1137); + uint32_t _i1141; + for (_i1141 = 0; _i1141 < _size1137; ++_i1141) { - xfer += this->success[_i1097].read(iprot); + xfer += this->success[_i1141].read(iprot); } xfer += iprot->readListEnd(); } @@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1098; - for (_iter1098 = this->success.begin(); _iter1098 != this->success.end(); ++_iter1098) + std::vector ::const_iterator _iter1142; + for (_iter1142 = this->success.begin(); _iter1142 != this->success.end(); ++_iter1142) { - xfer += (*_iter1098).write(oprot); + xfer += (*_iter1142).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1099; - ::apache::thrift::protocol::TType _etype1102; - xfer += iprot->readListBegin(_etype1102, _size1099); - (*(this->success)).resize(_size1099); - uint32_t _i1103; - for (_i1103 = 0; _i1103 < _size1099; ++_i1103) + uint32_t _size1143; + ::apache::thrift::protocol::TType _etype1146; + xfer += iprot->readListBegin(_etype1146, _size1143); + (*(this->success)).resize(_size1143); + uint32_t _i1147; + for (_i1147 = 0; _i1147 < _size1143; ++_i1147) { - xfer += (*(this->success))[_i1103].read(iprot); + xfer += (*(this->success))[_i1147].read(iprot); } xfer += iprot->readListEnd(); } @@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1104; - ::apache::thrift::protocol::TType _etype1107; - xfer += iprot->readListBegin(_etype1107, _size1104); - this->success.resize(_size1104); - uint32_t _i1108; - for (_i1108 = 0; _i1108 < _size1104; ++_i1108) + uint32_t _size1148; + ::apache::thrift::protocol::TType _etype1151; + xfer += iprot->readListBegin(_etype1151, _size1148); + this->success.resize(_size1148); + uint32_t _i1152; + for (_i1152 = 0; _i1152 < _size1148; ++_i1152) { - xfer += this->success[_i1108].read(iprot); + xfer += this->success[_i1152].read(iprot); } xfer += iprot->readListEnd(); } @@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1109; - for (_iter1109 = this->success.begin(); _iter1109 != this->success.end(); ++_iter1109) + std::vector ::const_iterator _iter1153; + for (_iter1153 = this->success.begin(); _iter1153 != this->success.end(); ++_iter1153) { - xfer += (*_iter1109).write(oprot); + xfer += (*_iter1153).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1110; - ::apache::thrift::protocol::TType _etype1113; - xfer += iprot->readListBegin(_etype1113, _size1110); - (*(this->success)).resize(_size1110); - uint32_t _i1114; - for (_i1114 = 0; _i1114 < _size1110; ++_i1114) + uint32_t _size1154; + ::apache::thrift::protocol::TType _etype1157; + xfer += iprot->readListBegin(_etype1157, _size1154); + (*(this->success)).resize(_size1154); + uint32_t _i1158; + for (_i1158 = 0; _i1158 < _size1154; ++_i1158) { - xfer += (*(this->success))[_i1114].read(iprot); + xfer += (*(this->success))[_i1158].read(iprot); } xfer += iprot->readListEnd(); } @@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1115; - ::apache::thrift::protocol::TType _etype1118; - xfer += iprot->readListBegin(_etype1118, _size1115); - this->success.resize(_size1115); - uint32_t _i1119; - for (_i1119 = 0; _i1119 < _size1115; ++_i1119) + uint32_t _size1159; + ::apache::thrift::protocol::TType _etype1162; + xfer += iprot->readListBegin(_etype1162, _size1159); + this->success.resize(_size1159); + uint32_t _i1163; + for (_i1163 = 0; _i1163 < _size1159; ++_i1163) { - xfer += this->success[_i1119].read(iprot); + xfer += this->success[_i1163].read(iprot); } xfer += iprot->readListEnd(); } @@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1120; - for (_iter1120 = this->success.begin(); _iter1120 != this->success.end(); ++_iter1120) + std::vector ::const_iterator _iter1164; + for (_iter1164 = this->success.begin(); _iter1164 != this->success.end(); ++_iter1164) { - xfer += (*_iter1120).write(oprot); + xfer += (*_iter1164).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1121; - ::apache::thrift::protocol::TType _etype1124; - xfer += iprot->readListBegin(_etype1124, _size1121); - (*(this->success)).resize(_size1121); - uint32_t _i1125; - for (_i1125 = 0; _i1125 < _size1121; ++_i1125) + uint32_t _size1165; + ::apache::thrift::protocol::TType _etype1168; + xfer += iprot->readListBegin(_etype1168, _size1165); + (*(this->success)).resize(_size1165); + uint32_t _i1169; + for (_i1169 = 0; _i1169 < _size1165; ++_i1169) { - xfer += (*(this->success))[_i1125].read(iprot); + xfer += (*(this->success))[_i1169].read(iprot); } xfer += iprot->readListEnd(); } @@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1126; - ::apache::thrift::protocol::TType _etype1129; - xfer += iprot->readListBegin(_etype1129, _size1126); - this->success.resize(_size1126); - uint32_t _i1130; - for (_i1130 = 0; _i1130 < _size1126; ++_i1130) + uint32_t _size1170; + ::apache::thrift::protocol::TType _etype1173; + xfer += iprot->readListBegin(_etype1173, _size1170); + this->success.resize(_size1170); + uint32_t _i1174; + for (_i1174 = 0; _i1174 < _size1170; ++_i1174) { - xfer += this->success[_i1130].read(iprot); + xfer += this->success[_i1174].read(iprot); } xfer += iprot->readListEnd(); } @@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1131; - for (_iter1131 = this->success.begin(); _iter1131 != this->success.end(); ++_iter1131) + std::vector ::const_iterator _iter1175; + for (_iter1175 = this->success.begin(); _iter1175 != this->success.end(); ++_iter1175) { - xfer += (*_iter1131).write(oprot); + xfer += (*_iter1175).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1132; - ::apache::thrift::protocol::TType _etype1135; - xfer += iprot->readListBegin(_etype1135, _size1132); - (*(this->success)).resize(_size1132); - uint32_t _i1136; - for (_i1136 = 0; _i1136 < _size1132; ++_i1136) + uint32_t _size1176; + ::apache::thrift::protocol::TType _etype1179; + xfer += iprot->readListBegin(_etype1179, _size1176); + (*(this->success)).resize(_size1176); + uint32_t _i1180; + for (_i1180 = 0; _i1180 < _size1176; ++_i1180) { - xfer += (*(this->success))[_i1136].read(iprot); + xfer += (*(this->success))[_i1180].read(iprot); } xfer += iprot->readListEnd(); } @@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size1137; - ::apache::thrift::protocol::TType _etype1140; - xfer += iprot->readListBegin(_etype1140, _size1137); - this->primaryKeys.resize(_size1137); - uint32_t _i1141; - for (_i1141 = 0; _i1141 < _size1137; ++_i1141) + uint32_t _size1181; + ::apache::thrift::protocol::TType _etype1184; + xfer += iprot->readListBegin(_etype1184, _size1181); + this->primaryKeys.resize(_size1181); + uint32_t _i1185; + for (_i1185 = 0; _i1185 < _size1181; ++_i1185) { - xfer += this->primaryKeys[_i1141].read(iprot); + xfer += this->primaryKeys[_i1185].read(iprot); } xfer += iprot->readListEnd(); } @@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size1142; - ::apache::thrift::protocol::TType _etype1145; - xfer += iprot->readListBegin(_etype1145, _size1142); - this->foreignKeys.resize(_size1142); - uint32_t _i1146; - for (_i1146 = 0; _i1146 < _size1142; ++_i1146) + uint32_t _size1186; + ::apache::thrift::protocol::TType _etype1189; + xfer += iprot->readListBegin(_etype1189, _size1186); + this->foreignKeys.resize(_size1186); + uint32_t _i1190; + for (_i1190 = 0; _i1190 < _size1186; ++_i1190) { - xfer += this->foreignKeys[_i1146].read(iprot); + xfer += this->foreignKeys[_i1190].read(iprot); } xfer += iprot->readListEnd(); } @@ -4558,14 +4558,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size1147; - ::apache::thrift::protocol::TType _etype1150; - xfer += iprot->readListBegin(_etype1150, _size1147); - this->uniqueConstraints.resize(_size1147); - uint32_t _i1151; - for (_i1151 = 0; _i1151 < _size1147; ++_i1151) + uint32_t _size1191; + ::apache::thrift::protocol::TType _etype1194; + xfer += iprot->readListBegin(_etype1194, _size1191); + this->uniqueConstraints.resize(_size1191); + uint32_t _i1195; + for (_i1195 = 0; _i1195 < _size1191; ++_i1195) { - xfer += this->uniqueConstraints[_i1151].read(iprot); + xfer += this->uniqueConstraints[_i1195].read(iprot); } xfer += iprot->readListEnd(); } @@ -4578,14 +4578,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size1152; - ::apache::thrift::protocol::TType _etype1155; - xfer += iprot->readListBegin(_etype1155, _size1152); - this->notNullConstraints.resize(_size1152); - uint32_t _i1156; - for (_i1156 = 0; _i1156 < _size1152; ++_i1156) + uint32_t _size1196; + ::apache::thrift::protocol::TType _etype1199; + xfer += iprot->readListBegin(_etype1199, _size1196); + this->notNullConstraints.resize(_size1196); + uint32_t _i1200; + for (_i1200 = 0; _i1200 < _size1196; ++_i1200) { - xfer += this->notNullConstraints[_i1156].read(iprot); + xfer += this->notNullConstraints[_i1200].read(iprot); } xfer += iprot->readListEnd(); } @@ -4618,10 +4618,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter1157; - for (_iter1157 = this->primaryKeys.begin(); _iter1157 != this->primaryKeys.end(); ++_iter1157) + std::vector ::const_iterator _iter1201; + for (_iter1201 = this->primaryKeys.begin(); _iter1201 != this->primaryKeys.end(); ++_iter1201) { - xfer += (*_iter1157).write(oprot); + xfer += (*_iter1201).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4630,10 +4630,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter1158; - for (_iter1158 = this->foreignKeys.begin(); _iter1158 != this->foreignKeys.end(); ++_iter1158) + std::vector ::const_iterator _iter1202; + for (_iter1202 = this->foreignKeys.begin(); _iter1202 != this->foreignKeys.end(); ++_iter1202) { - xfer += (*_iter1158).write(oprot); + xfer += (*_iter1202).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4642,10 +4642,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter1159; - for (_iter1159 = this->uniqueConstraints.begin(); _iter1159 != this->uniqueConstraints.end(); ++_iter1159) + std::vector ::const_iterator _iter1203; + for (_iter1203 = this->uniqueConstraints.begin(); _iter1203 != this->uniqueConstraints.end(); ++_iter1203) { - xfer += (*_iter1159).write(oprot); + xfer += (*_iter1203).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4654,10 +4654,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter1160; - for (_iter1160 = this->notNullConstraints.begin(); _iter1160 != this->notNullConstraints.end(); ++_iter1160) + std::vector ::const_iterator _iter1204; + for (_iter1204 = this->notNullConstraints.begin(); _iter1204 != this->notNullConstraints.end(); ++_iter1204) { - xfer += (*_iter1160).write(oprot); + xfer += (*_iter1204).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4685,10 +4685,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter1161; - for (_iter1161 = (*(this->primaryKeys)).begin(); _iter1161 != (*(this->primaryKeys)).end(); ++_iter1161) + std::vector ::const_iterator _iter1205; + for (_iter1205 = (*(this->primaryKeys)).begin(); _iter1205 != (*(this->primaryKeys)).end(); ++_iter1205) { - xfer += (*_iter1161).write(oprot); + xfer += (*_iter1205).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4697,10 +4697,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter1162; - for (_iter1162 = (*(this->foreignKeys)).begin(); _iter1162 != (*(this->foreignKeys)).end(); ++_iter1162) + std::vector ::const_iterator _iter1206; + for (_iter1206 = (*(this->foreignKeys)).begin(); _iter1206 != (*(this->foreignKeys)).end(); ++_iter1206) { - xfer += (*_iter1162).write(oprot); + xfer += (*_iter1206).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4709,10 +4709,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->uniqueConstraints)).size())); - std::vector ::const_iterator _iter1163; - for (_iter1163 = (*(this->uniqueConstraints)).begin(); _iter1163 != (*(this->uniqueConstraints)).end(); ++_iter1163) + std::vector ::const_iterator _iter1207; + for (_iter1207 = (*(this->uniqueConstraints)).begin(); _iter1207 != (*(this->uniqueConstraints)).end(); ++_iter1207) { - xfer += (*_iter1163).write(oprot); + xfer += (*_iter1207).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4721,10 +4721,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->notNullConstraints)).size())); - std::vector ::const_iterator _iter1164; - for (_iter1164 = (*(this->notNullConstraints)).begin(); _iter1164 != (*(this->notNullConstraints)).end(); ++_iter1164) + std::vector ::const_iterator _iter1208; + for (_iter1208 = (*(this->notNullConstraints)).begin(); _iter1208 != (*(this->notNullConstraints)).end(); ++_iter1208) { - xfer += (*_iter1164).write(oprot); + xfer += (*_iter1208).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6478,14 +6478,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size1165; - ::apache::thrift::protocol::TType _etype1168; - xfer += iprot->readListBegin(_etype1168, _size1165); - this->partNames.resize(_size1165); - uint32_t _i1169; - for (_i1169 = 0; _i1169 < _size1165; ++_i1169) + uint32_t _size1209; + ::apache::thrift::protocol::TType _etype1212; + xfer += iprot->readListBegin(_etype1212, _size1209); + this->partNames.resize(_size1209); + uint32_t _i1213; + for (_i1213 = 0; _i1213 < _size1209; ++_i1213) { - xfer += iprot->readString(this->partNames[_i1169]); + xfer += iprot->readString(this->partNames[_i1213]); } xfer += iprot->readListEnd(); } @@ -6522,10 +6522,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter1170; - for (_iter1170 = this->partNames.begin(); _iter1170 != this->partNames.end(); ++_iter1170) + std::vector ::const_iterator _iter1214; + for (_iter1214 = this->partNames.begin(); _iter1214 != this->partNames.end(); ++_iter1214) { - xfer += oprot->writeString((*_iter1170)); + xfer += oprot->writeString((*_iter1214)); } xfer += oprot->writeListEnd(); } @@ -6557,10 +6557,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->partNames)).size())); - std::vector ::const_iterator _iter1171; - for (_iter1171 = (*(this->partNames)).begin(); _iter1171 != (*(this->partNames)).end(); ++_iter1171) + std::vector ::const_iterator _iter1215; + for (_iter1215 = (*(this->partNames)).begin(); _iter1215 != (*(this->partNames)).end(); ++_iter1215) { - xfer += oprot->writeString((*_iter1171)); + xfer += oprot->writeString((*_iter1215)); } xfer += oprot->writeListEnd(); } @@ -6804,14 +6804,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1172; - ::apache::thrift::protocol::TType _etype1175; - xfer += iprot->readListBegin(_etype1175, _size1172); - this->success.resize(_size1172); - uint32_t _i1176; - for (_i1176 = 0; _i1176 < _size1172; ++_i1176) + uint32_t _size1216; + ::apache::thrift::protocol::TType _etype1219; + xfer += iprot->readListBegin(_etype1219, _size1216); + this->success.resize(_size1216); + uint32_t _i1220; + for (_i1220 = 0; _i1220 < _size1216; ++_i1220) { - xfer += iprot->readString(this->success[_i1176]); + xfer += iprot->readString(this->success[_i1220]); } xfer += iprot->readListEnd(); } @@ -6850,10 +6850,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1177; - for (_iter1177 = this->success.begin(); _iter1177 != this->success.end(); ++_iter1177) + std::vector ::const_iterator _iter1221; + for (_iter1221 = this->success.begin(); _iter1221 != this->success.end(); ++_iter1221) { - xfer += oprot->writeString((*_iter1177)); + xfer += oprot->writeString((*_iter1221)); } xfer += oprot->writeListEnd(); } @@ -6898,14 +6898,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1178; - ::apache::thrift::protocol::TType _etype1181; - xfer += iprot->readListBegin(_etype1181, _size1178); - (*(this->success)).resize(_size1178); - uint32_t _i1182; - for (_i1182 = 0; _i1182 < _size1178; ++_i1182) + uint32_t _size1222; + ::apache::thrift::protocol::TType _etype1225; + xfer += iprot->readListBegin(_etype1225, _size1222); + (*(this->success)).resize(_size1222); + uint32_t _i1226; + for (_i1226 = 0; _i1226 < _size1222; ++_i1226) { - xfer += iprot->readString((*(this->success))[_i1182]); + xfer += iprot->readString((*(this->success))[_i1226]); } xfer += iprot->readListEnd(); } @@ -7075,14 +7075,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1183; - ::apache::thrift::protocol::TType _etype1186; - xfer += iprot->readListBegin(_etype1186, _size1183); - this->success.resize(_size1183); - uint32_t _i1187; - for (_i1187 = 0; _i1187 < _size1183; ++_i1187) + uint32_t _size1227; + ::apache::thrift::protocol::TType _etype1230; + xfer += iprot->readListBegin(_etype1230, _size1227); + this->success.resize(_size1227); + uint32_t _i1231; + for (_i1231 = 0; _i1231 < _size1227; ++_i1231) { - xfer += iprot->readString(this->success[_i1187]); + xfer += iprot->readString(this->success[_i1231]); } xfer += iprot->readListEnd(); } @@ -7121,10 +7121,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1188; - for (_iter1188 = this->success.begin(); _iter1188 != this->success.end(); ++_iter1188) + std::vector ::const_iterator _iter1232; + for (_iter1232 = this->success.begin(); _iter1232 != this->success.end(); ++_iter1232) { - xfer += oprot->writeString((*_iter1188)); + xfer += oprot->writeString((*_iter1232)); } xfer += oprot->writeListEnd(); } @@ -7169,14 +7169,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1189; - ::apache::thrift::protocol::TType _etype1192; - xfer += iprot->readListBegin(_etype1192, _size1189); - (*(this->success)).resize(_size1189); - uint32_t _i1193; - for (_i1193 = 0; _i1193 < _size1189; ++_i1193) + uint32_t _size1233; + ::apache::thrift::protocol::TType _etype1236; + xfer += iprot->readListBegin(_etype1236, _size1233); + (*(this->success)).resize(_size1233); + uint32_t _i1237; + for (_i1237 = 0; _i1237 < _size1233; ++_i1237) { - xfer += iprot->readString((*(this->success))[_i1193]); + xfer += iprot->readString((*(this->success))[_i1237]); } xfer += iprot->readListEnd(); } @@ -7314,14 +7314,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1194; - ::apache::thrift::protocol::TType _etype1197; - xfer += iprot->readListBegin(_etype1197, _size1194); - this->success.resize(_size1194); - uint32_t _i1198; - for (_i1198 = 0; _i1198 < _size1194; ++_i1198) + uint32_t _size1238; + ::apache::thrift::protocol::TType _etype1241; + xfer += iprot->readListBegin(_etype1241, _size1238); + this->success.resize(_size1238); + uint32_t _i1242; + for (_i1242 = 0; _i1242 < _size1238; ++_i1242) { - xfer += iprot->readString(this->success[_i1198]); + xfer += iprot->readString(this->success[_i1242]); } xfer += iprot->readListEnd(); } @@ -7360,10 +7360,10 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::write( xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1199; - for (_iter1199 = this->success.begin(); _iter1199 != this->success.end(); ++_iter1199) + std::vector ::const_iterator _iter1243; + for (_iter1243 = this->success.begin(); _iter1243 != this->success.end(); ++_iter1243) { - xfer += oprot->writeString((*_iter1199)); + xfer += oprot->writeString((*_iter1243)); } xfer += oprot->writeListEnd(); } @@ -7408,14 +7408,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1200; - ::apache::thrift::protocol::TType _etype1203; - xfer += iprot->readListBegin(_etype1203, _size1200); - (*(this->success)).resize(_size1200); - uint32_t _i1204; - for (_i1204 = 0; _i1204 < _size1200; ++_i1204) + uint32_t _size1244; + ::apache::thrift::protocol::TType _etype1247; + xfer += iprot->readListBegin(_etype1247, _size1244); + (*(this->success)).resize(_size1244); + uint32_t _i1248; + for (_i1248 = 0; _i1248 < _size1244; ++_i1248) { - xfer += iprot->readString((*(this->success))[_i1204]); + xfer += iprot->readString((*(this->success))[_i1248]); } xfer += iprot->readListEnd(); } @@ -7490,14 +7490,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size1205; - ::apache::thrift::protocol::TType _etype1208; - xfer += iprot->readListBegin(_etype1208, _size1205); - this->tbl_types.resize(_size1205); - uint32_t _i1209; - for (_i1209 = 0; _i1209 < _size1205; ++_i1209) + uint32_t _size1249; + ::apache::thrift::protocol::TType _etype1252; + xfer += iprot->readListBegin(_etype1252, _size1249); + this->tbl_types.resize(_size1249); + uint32_t _i1253; + for (_i1253 = 0; _i1253 < _size1249; ++_i1253) { - xfer += iprot->readString(this->tbl_types[_i1209]); + xfer += iprot->readString(this->tbl_types[_i1253]); } xfer += iprot->readListEnd(); } @@ -7534,10 +7534,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter1210; - for (_iter1210 = this->tbl_types.begin(); _iter1210 != this->tbl_types.end(); ++_iter1210) + std::vector ::const_iterator _iter1254; + for (_iter1254 = this->tbl_types.begin(); _iter1254 != this->tbl_types.end(); ++_iter1254) { - xfer += oprot->writeString((*_iter1210)); + xfer += oprot->writeString((*_iter1254)); } xfer += oprot->writeListEnd(); } @@ -7569,10 +7569,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter1211; - for (_iter1211 = (*(this->tbl_types)).begin(); _iter1211 != (*(this->tbl_types)).end(); ++_iter1211) + std::vector ::const_iterator _iter1255; + for (_iter1255 = (*(this->tbl_types)).begin(); _iter1255 != (*(this->tbl_types)).end(); ++_iter1255) { - xfer += oprot->writeString((*_iter1211)); + xfer += oprot->writeString((*_iter1255)); } xfer += oprot->writeListEnd(); } @@ -7613,14 +7613,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1212; - ::apache::thrift::protocol::TType _etype1215; - xfer += iprot->readListBegin(_etype1215, _size1212); - this->success.resize(_size1212); - uint32_t _i1216; - for (_i1216 = 0; _i1216 < _size1212; ++_i1216) + uint32_t _size1256; + ::apache::thrift::protocol::TType _etype1259; + xfer += iprot->readListBegin(_etype1259, _size1256); + this->success.resize(_size1256); + uint32_t _i1260; + for (_i1260 = 0; _i1260 < _size1256; ++_i1260) { - xfer += this->success[_i1216].read(iprot); + xfer += this->success[_i1260].read(iprot); } xfer += iprot->readListEnd(); } @@ -7659,10 +7659,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1217; - for (_iter1217 = this->success.begin(); _iter1217 != this->success.end(); ++_iter1217) + std::vector ::const_iterator _iter1261; + for (_iter1261 = this->success.begin(); _iter1261 != this->success.end(); ++_iter1261) { - xfer += (*_iter1217).write(oprot); + xfer += (*_iter1261).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7707,14 +7707,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1218; - ::apache::thrift::protocol::TType _etype1221; - xfer += iprot->readListBegin(_etype1221, _size1218); - (*(this->success)).resize(_size1218); - uint32_t _i1222; - for (_i1222 = 0; _i1222 < _size1218; ++_i1222) + uint32_t _size1262; + ::apache::thrift::protocol::TType _etype1265; + xfer += iprot->readListBegin(_etype1265, _size1262); + (*(this->success)).resize(_size1262); + uint32_t _i1266; + for (_i1266 = 0; _i1266 < _size1262; ++_i1266) { - xfer += (*(this->success))[_i1222].read(iprot); + xfer += (*(this->success))[_i1266].read(iprot); } xfer += iprot->readListEnd(); } @@ -7852,14 +7852,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1223; - ::apache::thrift::protocol::TType _etype1226; - xfer += iprot->readListBegin(_etype1226, _size1223); - this->success.resize(_size1223); - uint32_t _i1227; - for (_i1227 = 0; _i1227 < _size1223; ++_i1227) + uint32_t _size1267; + ::apache::thrift::protocol::TType _etype1270; + xfer += iprot->readListBegin(_etype1270, _size1267); + this->success.resize(_size1267); + uint32_t _i1271; + for (_i1271 = 0; _i1271 < _size1267; ++_i1271) { - xfer += iprot->readString(this->success[_i1227]); + xfer += iprot->readString(this->success[_i1271]); } xfer += iprot->readListEnd(); } @@ -7898,10 +7898,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1228; - for (_iter1228 = this->success.begin(); _iter1228 != this->success.end(); ++_iter1228) + std::vector ::const_iterator _iter1272; + for (_iter1272 = this->success.begin(); _iter1272 != this->success.end(); ++_iter1272) { - xfer += oprot->writeString((*_iter1228)); + xfer += oprot->writeString((*_iter1272)); } xfer += oprot->writeListEnd(); } @@ -7946,14 +7946,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1229; - ::apache::thrift::protocol::TType _etype1232; - xfer += iprot->readListBegin(_etype1232, _size1229); - (*(this->success)).resize(_size1229); - uint32_t _i1233; - for (_i1233 = 0; _i1233 < _size1229; ++_i1233) + uint32_t _size1273; + ::apache::thrift::protocol::TType _etype1276; + xfer += iprot->readListBegin(_etype1276, _size1273); + (*(this->success)).resize(_size1273); + uint32_t _i1277; + for (_i1277 = 0; _i1277 < _size1273; ++_i1277) { - xfer += iprot->readString((*(this->success))[_i1233]); + xfer += iprot->readString((*(this->success))[_i1277]); } xfer += iprot->readListEnd(); } @@ -8263,14 +8263,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1234; - ::apache::thrift::protocol::TType _etype1237; - xfer += iprot->readListBegin(_etype1237, _size1234); - this->tbl_names.resize(_size1234); - uint32_t _i1238; - for (_i1238 = 0; _i1238 < _size1234; ++_i1238) + uint32_t _size1278; + ::apache::thrift::protocol::TType _etype1281; + xfer += iprot->readListBegin(_etype1281, _size1278); + this->tbl_names.resize(_size1278); + uint32_t _i1282; + for (_i1282 = 0; _i1282 < _size1278; ++_i1282) { - xfer += iprot->readString(this->tbl_names[_i1238]); + xfer += iprot->readString(this->tbl_names[_i1282]); } xfer += iprot->readListEnd(); } @@ -8303,10 +8303,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1239; - for (_iter1239 = this->tbl_names.begin(); _iter1239 != this->tbl_names.end(); ++_iter1239) + std::vector ::const_iterator _iter1283; + for (_iter1283 = this->tbl_names.begin(); _iter1283 != this->tbl_names.end(); ++_iter1283) { - xfer += oprot->writeString((*_iter1239)); + xfer += oprot->writeString((*_iter1283)); } xfer += oprot->writeListEnd(); } @@ -8334,10 +8334,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1240; - for (_iter1240 = (*(this->tbl_names)).begin(); _iter1240 != (*(this->tbl_names)).end(); ++_iter1240) + std::vector ::const_iterator _iter1284; + for (_iter1284 = (*(this->tbl_names)).begin(); _iter1284 != (*(this->tbl_names)).end(); ++_iter1284) { - xfer += oprot->writeString((*_iter1240)); + xfer += oprot->writeString((*_iter1284)); } xfer += oprot->writeListEnd(); } @@ -8378,14 +8378,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1241; - ::apache::thrift::protocol::TType _etype1244; - xfer += iprot->readListBegin(_etype1244, _size1241); - this->success.resize(_size1241); - uint32_t _i1245; - for (_i1245 = 0; _i1245 < _size1241; ++_i1245) + uint32_t _size1285; + ::apache::thrift::protocol::TType _etype1288; + xfer += iprot->readListBegin(_etype1288, _size1285); + this->success.resize(_size1285); + uint32_t _i1289; + for (_i1289 = 0; _i1289 < _size1285; ++_i1289) { - xfer += this->success[_i1245].read(iprot); + xfer += this->success[_i1289].read(iprot); } xfer += iprot->readListEnd(); } @@ -8416,10 +8416,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1246; - for (_iter1246 = this->success.begin(); _iter1246 != this->success.end(); ++_iter1246) + std::vector
::const_iterator _iter1290; + for (_iter1290 = this->success.begin(); _iter1290 != this->success.end(); ++_iter1290) { - xfer += (*_iter1246).write(oprot); + xfer += (*_iter1290).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8460,14 +8460,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1247; - ::apache::thrift::protocol::TType _etype1250; - xfer += iprot->readListBegin(_etype1250, _size1247); - (*(this->success)).resize(_size1247); - uint32_t _i1251; - for (_i1251 = 0; _i1251 < _size1247; ++_i1251) + uint32_t _size1291; + ::apache::thrift::protocol::TType _etype1294; + xfer += iprot->readListBegin(_etype1294, _size1291); + (*(this->success)).resize(_size1291); + uint32_t _i1295; + for (_i1295 = 0; _i1295 < _size1291; ++_i1295) { - xfer += (*(this->success))[_i1251].read(iprot); + xfer += (*(this->success))[_i1295].read(iprot); } xfer += iprot->readListEnd(); } @@ -9000,14 +9000,14 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1252; - ::apache::thrift::protocol::TType _etype1255; - xfer += iprot->readListBegin(_etype1255, _size1252); - this->tbl_names.resize(_size1252); - uint32_t _i1256; - for (_i1256 = 0; _i1256 < _size1252; ++_i1256) + uint32_t _size1296; + ::apache::thrift::protocol::TType _etype1299; + xfer += iprot->readListBegin(_etype1299, _size1296); + this->tbl_names.resize(_size1296); + uint32_t _i1300; + for (_i1300 = 0; _i1300 < _size1296; ++_i1300) { - xfer += iprot->readString(this->tbl_names[_i1256]); + xfer += iprot->readString(this->tbl_names[_i1300]); } xfer += iprot->readListEnd(); } @@ -9040,10 +9040,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::write(: xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1257; - for (_iter1257 = this->tbl_names.begin(); _iter1257 != this->tbl_names.end(); ++_iter1257) + std::vector ::const_iterator _iter1301; + for (_iter1301 = this->tbl_names.begin(); _iter1301 != this->tbl_names.end(); ++_iter1301) { - xfer += oprot->writeString((*_iter1257)); + xfer += oprot->writeString((*_iter1301)); } xfer += oprot->writeListEnd(); } @@ -9071,10 +9071,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_pargs::write( xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1258; - for (_iter1258 = (*(this->tbl_names)).begin(); _iter1258 != (*(this->tbl_names)).end(); ++_iter1258) + std::vector ::const_iterator _iter1302; + for (_iter1302 = (*(this->tbl_names)).begin(); _iter1302 != (*(this->tbl_names)).end(); ++_iter1302) { - xfer += oprot->writeString((*_iter1258)); + xfer += oprot->writeString((*_iter1302)); } xfer += oprot->writeListEnd(); } @@ -9115,17 +9115,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::read( if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1259; - ::apache::thrift::protocol::TType _ktype1260; - ::apache::thrift::protocol::TType _vtype1261; - xfer += iprot->readMapBegin(_ktype1260, _vtype1261, _size1259); - uint32_t _i1263; - for (_i1263 = 0; _i1263 < _size1259; ++_i1263) + uint32_t _size1303; + ::apache::thrift::protocol::TType _ktype1304; + ::apache::thrift::protocol::TType _vtype1305; + xfer += iprot->readMapBegin(_ktype1304, _vtype1305, _size1303); + uint32_t _i1307; + for (_i1307 = 0; _i1307 < _size1303; ++_i1307) { - std::string _key1264; - xfer += iprot->readString(_key1264); - Materialization& _val1265 = this->success[_key1264]; - xfer += _val1265.read(iprot); + std::string _key1308; + xfer += iprot->readString(_key1308); + Materialization& _val1309 = this->success[_key1308]; + xfer += _val1309.read(iprot); } xfer += iprot->readMapEnd(); } @@ -9180,11 +9180,11 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::write xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1266; - for (_iter1266 = this->success.begin(); _iter1266 != this->success.end(); ++_iter1266) + std::map ::const_iterator _iter1310; + for (_iter1310 = this->success.begin(); _iter1310 != this->success.end(); ++_iter1310) { - xfer += oprot->writeString(_iter1266->first); - xfer += _iter1266->second.write(oprot); + xfer += oprot->writeString(_iter1310->first); + xfer += _iter1310->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -9237,17 +9237,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1267; - ::apache::thrift::protocol::TType _ktype1268; - ::apache::thrift::protocol::TType _vtype1269; - xfer += iprot->readMapBegin(_ktype1268, _vtype1269, _size1267); - uint32_t _i1271; - for (_i1271 = 0; _i1271 < _size1267; ++_i1271) + uint32_t _size1311; + ::apache::thrift::protocol::TType _ktype1312; + ::apache::thrift::protocol::TType _vtype1313; + xfer += iprot->readMapBegin(_ktype1312, _vtype1313, _size1311); + uint32_t _i1315; + for (_i1315 = 0; _i1315 < _size1311; ++_i1315) { - std::string _key1272; - xfer += iprot->readString(_key1272); - Materialization& _val1273 = (*(this->success))[_key1272]; - xfer += _val1273.read(iprot); + std::string _key1316; + xfer += iprot->readString(_key1316); + Materialization& _val1317 = (*(this->success))[_key1316]; + xfer += _val1317.read(iprot); } xfer += iprot->readMapEnd(); } @@ -9433,14 +9433,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1274; - ::apache::thrift::protocol::TType _etype1277; - xfer += iprot->readListBegin(_etype1277, _size1274); - this->success.resize(_size1274); - uint32_t _i1278; - for (_i1278 = 0; _i1278 < _size1274; ++_i1278) + uint32_t _size1318; + ::apache::thrift::protocol::TType _etype1321; + xfer += iprot->readListBegin(_etype1321, _size1318); + this->success.resize(_size1318); + uint32_t _i1322; + for (_i1322 = 0; _i1322 < _size1318; ++_i1322) { - xfer += iprot->readString(this->success[_i1278]); + xfer += iprot->readString(this->success[_i1322]); } xfer += iprot->readListEnd(); } @@ -9495,10 +9495,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1279; - for (_iter1279 = this->success.begin(); _iter1279 != this->success.end(); ++_iter1279) + std::vector ::const_iterator _iter1323; + for (_iter1323 = this->success.begin(); _iter1323 != this->success.end(); ++_iter1323) { - xfer += oprot->writeString((*_iter1279)); + xfer += oprot->writeString((*_iter1323)); } xfer += oprot->writeListEnd(); } @@ -9551,14 +9551,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1280; - ::apache::thrift::protocol::TType _etype1283; - xfer += iprot->readListBegin(_etype1283, _size1280); - (*(this->success)).resize(_size1280); - uint32_t _i1284; - for (_i1284 = 0; _i1284 < _size1280; ++_i1284) + uint32_t _size1324; + ::apache::thrift::protocol::TType _etype1327; + xfer += iprot->readListBegin(_etype1327, _size1324); + (*(this->success)).resize(_size1324); + uint32_t _i1328; + for (_i1328 = 0; _i1328 < _size1324; ++_i1328) { - xfer += iprot->readString((*(this->success))[_i1284]); + xfer += iprot->readString((*(this->success))[_i1328]); } xfer += iprot->readListEnd(); } @@ -10892,14 +10892,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1285; - ::apache::thrift::protocol::TType _etype1288; - xfer += iprot->readListBegin(_etype1288, _size1285); - this->new_parts.resize(_size1285); - uint32_t _i1289; - for (_i1289 = 0; _i1289 < _size1285; ++_i1289) + uint32_t _size1329; + ::apache::thrift::protocol::TType _etype1332; + xfer += iprot->readListBegin(_etype1332, _size1329); + this->new_parts.resize(_size1329); + uint32_t _i1333; + for (_i1333 = 0; _i1333 < _size1329; ++_i1333) { - xfer += this->new_parts[_i1289].read(iprot); + xfer += this->new_parts[_i1333].read(iprot); } xfer += iprot->readListEnd(); } @@ -10928,10 +10928,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1290; - for (_iter1290 = this->new_parts.begin(); _iter1290 != this->new_parts.end(); ++_iter1290) + std::vector ::const_iterator _iter1334; + for (_iter1334 = this->new_parts.begin(); _iter1334 != this->new_parts.end(); ++_iter1334) { - xfer += (*_iter1290).write(oprot); + xfer += (*_iter1334).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10955,10 +10955,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1291; - for (_iter1291 = (*(this->new_parts)).begin(); _iter1291 != (*(this->new_parts)).end(); ++_iter1291) + std::vector ::const_iterator _iter1335; + for (_iter1335 = (*(this->new_parts)).begin(); _iter1335 != (*(this->new_parts)).end(); ++_iter1335) { - xfer += (*_iter1291).write(oprot); + xfer += (*_iter1335).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11167,14 +11167,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1292; - ::apache::thrift::protocol::TType _etype1295; - xfer += iprot->readListBegin(_etype1295, _size1292); - this->new_parts.resize(_size1292); - uint32_t _i1296; - for (_i1296 = 0; _i1296 < _size1292; ++_i1296) + uint32_t _size1336; + ::apache::thrift::protocol::TType _etype1339; + xfer += iprot->readListBegin(_etype1339, _size1336); + this->new_parts.resize(_size1336); + uint32_t _i1340; + for (_i1340 = 0; _i1340 < _size1336; ++_i1340) { - xfer += this->new_parts[_i1296].read(iprot); + xfer += this->new_parts[_i1340].read(iprot); } xfer += iprot->readListEnd(); } @@ -11203,10 +11203,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1297; - for (_iter1297 = this->new_parts.begin(); _iter1297 != this->new_parts.end(); ++_iter1297) + std::vector ::const_iterator _iter1341; + for (_iter1341 = this->new_parts.begin(); _iter1341 != this->new_parts.end(); ++_iter1341) { - xfer += (*_iter1297).write(oprot); + xfer += (*_iter1341).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11230,10 +11230,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1298; - for (_iter1298 = (*(this->new_parts)).begin(); _iter1298 != (*(this->new_parts)).end(); ++_iter1298) + std::vector ::const_iterator _iter1342; + for (_iter1342 = (*(this->new_parts)).begin(); _iter1342 != (*(this->new_parts)).end(); ++_iter1342) { - xfer += (*_iter1298).write(oprot); + xfer += (*_iter1342).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11458,14 +11458,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1299; - ::apache::thrift::protocol::TType _etype1302; - xfer += iprot->readListBegin(_etype1302, _size1299); - this->part_vals.resize(_size1299); - uint32_t _i1303; - for (_i1303 = 0; _i1303 < _size1299; ++_i1303) + uint32_t _size1343; + ::apache::thrift::protocol::TType _etype1346; + xfer += iprot->readListBegin(_etype1346, _size1343); + this->part_vals.resize(_size1343); + uint32_t _i1347; + for (_i1347 = 0; _i1347 < _size1343; ++_i1347) { - xfer += iprot->readString(this->part_vals[_i1303]); + xfer += iprot->readString(this->part_vals[_i1347]); } xfer += iprot->readListEnd(); } @@ -11502,10 +11502,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1304; - for (_iter1304 = this->part_vals.begin(); _iter1304 != this->part_vals.end(); ++_iter1304) + std::vector ::const_iterator _iter1348; + for (_iter1348 = this->part_vals.begin(); _iter1348 != this->part_vals.end(); ++_iter1348) { - xfer += oprot->writeString((*_iter1304)); + xfer += oprot->writeString((*_iter1348)); } xfer += oprot->writeListEnd(); } @@ -11537,10 +11537,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1305; - for (_iter1305 = (*(this->part_vals)).begin(); _iter1305 != (*(this->part_vals)).end(); ++_iter1305) + std::vector ::const_iterator _iter1349; + for (_iter1349 = (*(this->part_vals)).begin(); _iter1349 != (*(this->part_vals)).end(); ++_iter1349) { - xfer += oprot->writeString((*_iter1305)); + xfer += oprot->writeString((*_iter1349)); } xfer += oprot->writeListEnd(); } @@ -12012,14 +12012,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1306; - ::apache::thrift::protocol::TType _etype1309; - xfer += iprot->readListBegin(_etype1309, _size1306); - this->part_vals.resize(_size1306); - uint32_t _i1310; - for (_i1310 = 0; _i1310 < _size1306; ++_i1310) + uint32_t _size1350; + ::apache::thrift::protocol::TType _etype1353; + xfer += iprot->readListBegin(_etype1353, _size1350); + this->part_vals.resize(_size1350); + uint32_t _i1354; + for (_i1354 = 0; _i1354 < _size1350; ++_i1354) { - xfer += iprot->readString(this->part_vals[_i1310]); + xfer += iprot->readString(this->part_vals[_i1354]); } xfer += iprot->readListEnd(); } @@ -12064,10 +12064,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1311; - for (_iter1311 = this->part_vals.begin(); _iter1311 != this->part_vals.end(); ++_iter1311) + std::vector ::const_iterator _iter1355; + for (_iter1355 = this->part_vals.begin(); _iter1355 != this->part_vals.end(); ++_iter1355) { - xfer += oprot->writeString((*_iter1311)); + xfer += oprot->writeString((*_iter1355)); } xfer += oprot->writeListEnd(); } @@ -12103,10 +12103,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1312; - for (_iter1312 = (*(this->part_vals)).begin(); _iter1312 != (*(this->part_vals)).end(); ++_iter1312) + std::vector ::const_iterator _iter1356; + for (_iter1356 = (*(this->part_vals)).begin(); _iter1356 != (*(this->part_vals)).end(); ++_iter1356) { - xfer += oprot->writeString((*_iter1312)); + xfer += oprot->writeString((*_iter1356)); } xfer += oprot->writeListEnd(); } @@ -12909,14 +12909,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1313; - ::apache::thrift::protocol::TType _etype1316; - xfer += iprot->readListBegin(_etype1316, _size1313); - this->part_vals.resize(_size1313); - uint32_t _i1317; - for (_i1317 = 0; _i1317 < _size1313; ++_i1317) + uint32_t _size1357; + ::apache::thrift::protocol::TType _etype1360; + xfer += iprot->readListBegin(_etype1360, _size1357); + this->part_vals.resize(_size1357); + uint32_t _i1361; + for (_i1361 = 0; _i1361 < _size1357; ++_i1361) { - xfer += iprot->readString(this->part_vals[_i1317]); + xfer += iprot->readString(this->part_vals[_i1361]); } xfer += iprot->readListEnd(); } @@ -12961,10 +12961,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1318; - for (_iter1318 = this->part_vals.begin(); _iter1318 != this->part_vals.end(); ++_iter1318) + std::vector ::const_iterator _iter1362; + for (_iter1362 = this->part_vals.begin(); _iter1362 != this->part_vals.end(); ++_iter1362) { - xfer += oprot->writeString((*_iter1318)); + xfer += oprot->writeString((*_iter1362)); } xfer += oprot->writeListEnd(); } @@ -13000,10 +13000,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1319; - for (_iter1319 = (*(this->part_vals)).begin(); _iter1319 != (*(this->part_vals)).end(); ++_iter1319) + std::vector ::const_iterator _iter1363; + for (_iter1363 = (*(this->part_vals)).begin(); _iter1363 != (*(this->part_vals)).end(); ++_iter1363) { - xfer += oprot->writeString((*_iter1319)); + xfer += oprot->writeString((*_iter1363)); } xfer += oprot->writeListEnd(); } @@ -13212,14 +13212,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1320; - ::apache::thrift::protocol::TType _etype1323; - xfer += iprot->readListBegin(_etype1323, _size1320); - this->part_vals.resize(_size1320); - uint32_t _i1324; - for (_i1324 = 0; _i1324 < _size1320; ++_i1324) + uint32_t _size1364; + ::apache::thrift::protocol::TType _etype1367; + xfer += iprot->readListBegin(_etype1367, _size1364); + this->part_vals.resize(_size1364); + uint32_t _i1368; + for (_i1368 = 0; _i1368 < _size1364; ++_i1368) { - xfer += iprot->readString(this->part_vals[_i1324]); + xfer += iprot->readString(this->part_vals[_i1368]); } xfer += iprot->readListEnd(); } @@ -13272,10 +13272,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1325; - for (_iter1325 = this->part_vals.begin(); _iter1325 != this->part_vals.end(); ++_iter1325) + std::vector ::const_iterator _iter1369; + for (_iter1369 = this->part_vals.begin(); _iter1369 != this->part_vals.end(); ++_iter1369) { - xfer += oprot->writeString((*_iter1325)); + xfer += oprot->writeString((*_iter1369)); } xfer += oprot->writeListEnd(); } @@ -13315,10 +13315,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1326; - for (_iter1326 = (*(this->part_vals)).begin(); _iter1326 != (*(this->part_vals)).end(); ++_iter1326) + std::vector ::const_iterator _iter1370; + for (_iter1370 = (*(this->part_vals)).begin(); _iter1370 != (*(this->part_vals)).end(); ++_iter1370) { - xfer += oprot->writeString((*_iter1326)); + xfer += oprot->writeString((*_iter1370)); } xfer += oprot->writeListEnd(); } @@ -14324,14 +14324,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1327; - ::apache::thrift::protocol::TType _etype1330; - xfer += iprot->readListBegin(_etype1330, _size1327); - this->part_vals.resize(_size1327); - uint32_t _i1331; - for (_i1331 = 0; _i1331 < _size1327; ++_i1331) + uint32_t _size1371; + ::apache::thrift::protocol::TType _etype1374; + xfer += iprot->readListBegin(_etype1374, _size1371); + this->part_vals.resize(_size1371); + uint32_t _i1375; + for (_i1375 = 0; _i1375 < _size1371; ++_i1375) { - xfer += iprot->readString(this->part_vals[_i1331]); + xfer += iprot->readString(this->part_vals[_i1375]); } xfer += iprot->readListEnd(); } @@ -14368,10 +14368,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1332; - for (_iter1332 = this->part_vals.begin(); _iter1332 != this->part_vals.end(); ++_iter1332) + std::vector ::const_iterator _iter1376; + for (_iter1376 = this->part_vals.begin(); _iter1376 != this->part_vals.end(); ++_iter1376) { - xfer += oprot->writeString((*_iter1332)); + xfer += oprot->writeString((*_iter1376)); } xfer += oprot->writeListEnd(); } @@ -14403,10 +14403,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1333; - for (_iter1333 = (*(this->part_vals)).begin(); _iter1333 != (*(this->part_vals)).end(); ++_iter1333) + std::vector ::const_iterator _iter1377; + for (_iter1377 = (*(this->part_vals)).begin(); _iter1377 != (*(this->part_vals)).end(); ++_iter1377) { - xfer += oprot->writeString((*_iter1333)); + xfer += oprot->writeString((*_iter1377)); } xfer += oprot->writeListEnd(); } @@ -14595,17 +14595,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1334; - ::apache::thrift::protocol::TType _ktype1335; - ::apache::thrift::protocol::TType _vtype1336; - xfer += iprot->readMapBegin(_ktype1335, _vtype1336, _size1334); - uint32_t _i1338; - for (_i1338 = 0; _i1338 < _size1334; ++_i1338) + uint32_t _size1378; + ::apache::thrift::protocol::TType _ktype1379; + ::apache::thrift::protocol::TType _vtype1380; + xfer += iprot->readMapBegin(_ktype1379, _vtype1380, _size1378); + uint32_t _i1382; + for (_i1382 = 0; _i1382 < _size1378; ++_i1382) { - std::string _key1339; - xfer += iprot->readString(_key1339); - std::string& _val1340 = this->partitionSpecs[_key1339]; - xfer += iprot->readString(_val1340); + std::string _key1383; + xfer += iprot->readString(_key1383); + std::string& _val1384 = this->partitionSpecs[_key1383]; + xfer += iprot->readString(_val1384); } xfer += iprot->readMapEnd(); } @@ -14666,11 +14666,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1341; - for (_iter1341 = this->partitionSpecs.begin(); _iter1341 != this->partitionSpecs.end(); ++_iter1341) + std::map ::const_iterator _iter1385; + for (_iter1385 = this->partitionSpecs.begin(); _iter1385 != this->partitionSpecs.end(); ++_iter1385) { - xfer += oprot->writeString(_iter1341->first); - xfer += oprot->writeString(_iter1341->second); + xfer += oprot->writeString(_iter1385->first); + xfer += oprot->writeString(_iter1385->second); } xfer += oprot->writeMapEnd(); } @@ -14710,11 +14710,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1342; - for (_iter1342 = (*(this->partitionSpecs)).begin(); _iter1342 != (*(this->partitionSpecs)).end(); ++_iter1342) + std::map ::const_iterator _iter1386; + for (_iter1386 = (*(this->partitionSpecs)).begin(); _iter1386 != (*(this->partitionSpecs)).end(); ++_iter1386) { - xfer += oprot->writeString(_iter1342->first); - xfer += oprot->writeString(_iter1342->second); + xfer += oprot->writeString(_iter1386->first); + xfer += oprot->writeString(_iter1386->second); } xfer += oprot->writeMapEnd(); } @@ -14959,17 +14959,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1343; - ::apache::thrift::protocol::TType _ktype1344; - ::apache::thrift::protocol::TType _vtype1345; - xfer += iprot->readMapBegin(_ktype1344, _vtype1345, _size1343); - uint32_t _i1347; - for (_i1347 = 0; _i1347 < _size1343; ++_i1347) + uint32_t _size1387; + ::apache::thrift::protocol::TType _ktype1388; + ::apache::thrift::protocol::TType _vtype1389; + xfer += iprot->readMapBegin(_ktype1388, _vtype1389, _size1387); + uint32_t _i1391; + for (_i1391 = 0; _i1391 < _size1387; ++_i1391) { - std::string _key1348; - xfer += iprot->readString(_key1348); - std::string& _val1349 = this->partitionSpecs[_key1348]; - xfer += iprot->readString(_val1349); + std::string _key1392; + xfer += iprot->readString(_key1392); + std::string& _val1393 = this->partitionSpecs[_key1392]; + xfer += iprot->readString(_val1393); } xfer += iprot->readMapEnd(); } @@ -15030,11 +15030,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1350; - for (_iter1350 = this->partitionSpecs.begin(); _iter1350 != this->partitionSpecs.end(); ++_iter1350) + std::map ::const_iterator _iter1394; + for (_iter1394 = this->partitionSpecs.begin(); _iter1394 != this->partitionSpecs.end(); ++_iter1394) { - xfer += oprot->writeString(_iter1350->first); - xfer += oprot->writeString(_iter1350->second); + xfer += oprot->writeString(_iter1394->first); + xfer += oprot->writeString(_iter1394->second); } xfer += oprot->writeMapEnd(); } @@ -15074,11 +15074,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1351; - for (_iter1351 = (*(this->partitionSpecs)).begin(); _iter1351 != (*(this->partitionSpecs)).end(); ++_iter1351) + std::map ::const_iterator _iter1395; + for (_iter1395 = (*(this->partitionSpecs)).begin(); _iter1395 != (*(this->partitionSpecs)).end(); ++_iter1395) { - xfer += oprot->writeString(_iter1351->first); - xfer += oprot->writeString(_iter1351->second); + xfer += oprot->writeString(_iter1395->first); + xfer += oprot->writeString(_iter1395->second); } xfer += oprot->writeMapEnd(); } @@ -15135,14 +15135,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1352; - ::apache::thrift::protocol::TType _etype1355; - xfer += iprot->readListBegin(_etype1355, _size1352); - this->success.resize(_size1352); - uint32_t _i1356; - for (_i1356 = 0; _i1356 < _size1352; ++_i1356) + uint32_t _size1396; + ::apache::thrift::protocol::TType _etype1399; + xfer += iprot->readListBegin(_etype1399, _size1396); + this->success.resize(_size1396); + uint32_t _i1400; + for (_i1400 = 0; _i1400 < _size1396; ++_i1400) { - xfer += this->success[_i1356].read(iprot); + xfer += this->success[_i1400].read(iprot); } xfer += iprot->readListEnd(); } @@ -15205,10 +15205,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1357; - for (_iter1357 = this->success.begin(); _iter1357 != this->success.end(); ++_iter1357) + std::vector ::const_iterator _iter1401; + for (_iter1401 = this->success.begin(); _iter1401 != this->success.end(); ++_iter1401) { - xfer += (*_iter1357).write(oprot); + xfer += (*_iter1401).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15265,14 +15265,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1358; - ::apache::thrift::protocol::TType _etype1361; - xfer += iprot->readListBegin(_etype1361, _size1358); - (*(this->success)).resize(_size1358); - uint32_t _i1362; - for (_i1362 = 0; _i1362 < _size1358; ++_i1362) + uint32_t _size1402; + ::apache::thrift::protocol::TType _etype1405; + xfer += iprot->readListBegin(_etype1405, _size1402); + (*(this->success)).resize(_size1402); + uint32_t _i1406; + for (_i1406 = 0; _i1406 < _size1402; ++_i1406) { - xfer += (*(this->success))[_i1362].read(iprot); + xfer += (*(this->success))[_i1406].read(iprot); } xfer += iprot->readListEnd(); } @@ -15371,14 +15371,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1363; - ::apache::thrift::protocol::TType _etype1366; - xfer += iprot->readListBegin(_etype1366, _size1363); - this->part_vals.resize(_size1363); - uint32_t _i1367; - for (_i1367 = 0; _i1367 < _size1363; ++_i1367) + uint32_t _size1407; + ::apache::thrift::protocol::TType _etype1410; + xfer += iprot->readListBegin(_etype1410, _size1407); + this->part_vals.resize(_size1407); + uint32_t _i1411; + for (_i1411 = 0; _i1411 < _size1407; ++_i1411) { - xfer += iprot->readString(this->part_vals[_i1367]); + xfer += iprot->readString(this->part_vals[_i1411]); } xfer += iprot->readListEnd(); } @@ -15399,14 +15399,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1368; - ::apache::thrift::protocol::TType _etype1371; - xfer += iprot->readListBegin(_etype1371, _size1368); - this->group_names.resize(_size1368); - uint32_t _i1372; - for (_i1372 = 0; _i1372 < _size1368; ++_i1372) + uint32_t _size1412; + ::apache::thrift::protocol::TType _etype1415; + xfer += iprot->readListBegin(_etype1415, _size1412); + this->group_names.resize(_size1412); + uint32_t _i1416; + for (_i1416 = 0; _i1416 < _size1412; ++_i1416) { - xfer += iprot->readString(this->group_names[_i1372]); + xfer += iprot->readString(this->group_names[_i1416]); } xfer += iprot->readListEnd(); } @@ -15443,10 +15443,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1373; - for (_iter1373 = this->part_vals.begin(); _iter1373 != this->part_vals.end(); ++_iter1373) + std::vector ::const_iterator _iter1417; + for (_iter1417 = this->part_vals.begin(); _iter1417 != this->part_vals.end(); ++_iter1417) { - xfer += oprot->writeString((*_iter1373)); + xfer += oprot->writeString((*_iter1417)); } xfer += oprot->writeListEnd(); } @@ -15459,10 +15459,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1374; - for (_iter1374 = this->group_names.begin(); _iter1374 != this->group_names.end(); ++_iter1374) + std::vector ::const_iterator _iter1418; + for (_iter1418 = this->group_names.begin(); _iter1418 != this->group_names.end(); ++_iter1418) { - xfer += oprot->writeString((*_iter1374)); + xfer += oprot->writeString((*_iter1418)); } xfer += oprot->writeListEnd(); } @@ -15494,10 +15494,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1375; - for (_iter1375 = (*(this->part_vals)).begin(); _iter1375 != (*(this->part_vals)).end(); ++_iter1375) + std::vector ::const_iterator _iter1419; + for (_iter1419 = (*(this->part_vals)).begin(); _iter1419 != (*(this->part_vals)).end(); ++_iter1419) { - xfer += oprot->writeString((*_iter1375)); + xfer += oprot->writeString((*_iter1419)); } xfer += oprot->writeListEnd(); } @@ -15510,10 +15510,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1376; - for (_iter1376 = (*(this->group_names)).begin(); _iter1376 != (*(this->group_names)).end(); ++_iter1376) + std::vector ::const_iterator _iter1420; + for (_iter1420 = (*(this->group_names)).begin(); _iter1420 != (*(this->group_names)).end(); ++_iter1420) { - xfer += oprot->writeString((*_iter1376)); + xfer += oprot->writeString((*_iter1420)); } xfer += oprot->writeListEnd(); } @@ -16072,14 +16072,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1377; - ::apache::thrift::protocol::TType _etype1380; - xfer += iprot->readListBegin(_etype1380, _size1377); - this->success.resize(_size1377); - uint32_t _i1381; - for (_i1381 = 0; _i1381 < _size1377; ++_i1381) + uint32_t _size1421; + ::apache::thrift::protocol::TType _etype1424; + xfer += iprot->readListBegin(_etype1424, _size1421); + this->success.resize(_size1421); + uint32_t _i1425; + for (_i1425 = 0; _i1425 < _size1421; ++_i1425) { - xfer += this->success[_i1381].read(iprot); + xfer += this->success[_i1425].read(iprot); } xfer += iprot->readListEnd(); } @@ -16126,10 +16126,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1382; - for (_iter1382 = this->success.begin(); _iter1382 != this->success.end(); ++_iter1382) + std::vector ::const_iterator _iter1426; + for (_iter1426 = this->success.begin(); _iter1426 != this->success.end(); ++_iter1426) { - xfer += (*_iter1382).write(oprot); + xfer += (*_iter1426).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16178,14 +16178,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1383; - ::apache::thrift::protocol::TType _etype1386; - xfer += iprot->readListBegin(_etype1386, _size1383); - (*(this->success)).resize(_size1383); - uint32_t _i1387; - for (_i1387 = 0; _i1387 < _size1383; ++_i1387) + uint32_t _size1427; + ::apache::thrift::protocol::TType _etype1430; + xfer += iprot->readListBegin(_etype1430, _size1427); + (*(this->success)).resize(_size1427); + uint32_t _i1431; + for (_i1431 = 0; _i1431 < _size1427; ++_i1431) { - xfer += (*(this->success))[_i1387].read(iprot); + xfer += (*(this->success))[_i1431].read(iprot); } xfer += iprot->readListEnd(); } @@ -16284,14 +16284,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1388; - ::apache::thrift::protocol::TType _etype1391; - xfer += iprot->readListBegin(_etype1391, _size1388); - this->group_names.resize(_size1388); - uint32_t _i1392; - for (_i1392 = 0; _i1392 < _size1388; ++_i1392) + uint32_t _size1432; + ::apache::thrift::protocol::TType _etype1435; + xfer += iprot->readListBegin(_etype1435, _size1432); + this->group_names.resize(_size1432); + uint32_t _i1436; + for (_i1436 = 0; _i1436 < _size1432; ++_i1436) { - xfer += iprot->readString(this->group_names[_i1392]); + xfer += iprot->readString(this->group_names[_i1436]); } xfer += iprot->readListEnd(); } @@ -16336,10 +16336,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1393; - for (_iter1393 = this->group_names.begin(); _iter1393 != this->group_names.end(); ++_iter1393) + std::vector ::const_iterator _iter1437; + for (_iter1437 = this->group_names.begin(); _iter1437 != this->group_names.end(); ++_iter1437) { - xfer += oprot->writeString((*_iter1393)); + xfer += oprot->writeString((*_iter1437)); } xfer += oprot->writeListEnd(); } @@ -16379,10 +16379,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1394; - for (_iter1394 = (*(this->group_names)).begin(); _iter1394 != (*(this->group_names)).end(); ++_iter1394) + std::vector ::const_iterator _iter1438; + for (_iter1438 = (*(this->group_names)).begin(); _iter1438 != (*(this->group_names)).end(); ++_iter1438) { - xfer += oprot->writeString((*_iter1394)); + xfer += oprot->writeString((*_iter1438)); } xfer += oprot->writeListEnd(); } @@ -16423,14 +16423,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1395; - ::apache::thrift::protocol::TType _etype1398; - xfer += iprot->readListBegin(_etype1398, _size1395); - this->success.resize(_size1395); - uint32_t _i1399; - for (_i1399 = 0; _i1399 < _size1395; ++_i1399) + uint32_t _size1439; + ::apache::thrift::protocol::TType _etype1442; + xfer += iprot->readListBegin(_etype1442, _size1439); + this->success.resize(_size1439); + uint32_t _i1443; + for (_i1443 = 0; _i1443 < _size1439; ++_i1443) { - xfer += this->success[_i1399].read(iprot); + xfer += this->success[_i1443].read(iprot); } xfer += iprot->readListEnd(); } @@ -16477,10 +16477,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1400; - for (_iter1400 = this->success.begin(); _iter1400 != this->success.end(); ++_iter1400) + std::vector ::const_iterator _iter1444; + for (_iter1444 = this->success.begin(); _iter1444 != this->success.end(); ++_iter1444) { - xfer += (*_iter1400).write(oprot); + xfer += (*_iter1444).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16529,14 +16529,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1401; - ::apache::thrift::protocol::TType _etype1404; - xfer += iprot->readListBegin(_etype1404, _size1401); - (*(this->success)).resize(_size1401); - uint32_t _i1405; - for (_i1405 = 0; _i1405 < _size1401; ++_i1405) + uint32_t _size1445; + ::apache::thrift::protocol::TType _etype1448; + xfer += iprot->readListBegin(_etype1448, _size1445); + (*(this->success)).resize(_size1445); + uint32_t _i1449; + for (_i1449 = 0; _i1449 < _size1445; ++_i1449) { - xfer += (*(this->success))[_i1405].read(iprot); + xfer += (*(this->success))[_i1449].read(iprot); } xfer += iprot->readListEnd(); } @@ -16714,14 +16714,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1406; - ::apache::thrift::protocol::TType _etype1409; - xfer += iprot->readListBegin(_etype1409, _size1406); - this->success.resize(_size1406); - uint32_t _i1410; - for (_i1410 = 0; _i1410 < _size1406; ++_i1410) + uint32_t _size1450; + ::apache::thrift::protocol::TType _etype1453; + xfer += iprot->readListBegin(_etype1453, _size1450); + this->success.resize(_size1450); + uint32_t _i1454; + for (_i1454 = 0; _i1454 < _size1450; ++_i1454) { - xfer += this->success[_i1410].read(iprot); + xfer += this->success[_i1454].read(iprot); } xfer += iprot->readListEnd(); } @@ -16768,10 +16768,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1411; - for (_iter1411 = this->success.begin(); _iter1411 != this->success.end(); ++_iter1411) + std::vector ::const_iterator _iter1455; + for (_iter1455 = this->success.begin(); _iter1455 != this->success.end(); ++_iter1455) { - xfer += (*_iter1411).write(oprot); + xfer += (*_iter1455).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16820,14 +16820,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1412; - ::apache::thrift::protocol::TType _etype1415; - xfer += iprot->readListBegin(_etype1415, _size1412); - (*(this->success)).resize(_size1412); - uint32_t _i1416; - for (_i1416 = 0; _i1416 < _size1412; ++_i1416) + uint32_t _size1456; + ::apache::thrift::protocol::TType _etype1459; + xfer += iprot->readListBegin(_etype1459, _size1456); + (*(this->success)).resize(_size1456); + uint32_t _i1460; + for (_i1460 = 0; _i1460 < _size1456; ++_i1460) { - xfer += (*(this->success))[_i1416].read(iprot); + xfer += (*(this->success))[_i1460].read(iprot); } xfer += iprot->readListEnd(); } @@ -17005,14 +17005,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1417; - ::apache::thrift::protocol::TType _etype1420; - xfer += iprot->readListBegin(_etype1420, _size1417); - this->success.resize(_size1417); - uint32_t _i1421; - for (_i1421 = 0; _i1421 < _size1417; ++_i1421) + uint32_t _size1461; + ::apache::thrift::protocol::TType _etype1464; + xfer += iprot->readListBegin(_etype1464, _size1461); + this->success.resize(_size1461); + uint32_t _i1465; + for (_i1465 = 0; _i1465 < _size1461; ++_i1465) { - xfer += iprot->readString(this->success[_i1421]); + xfer += iprot->readString(this->success[_i1465]); } xfer += iprot->readListEnd(); } @@ -17059,10 +17059,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1422; - for (_iter1422 = this->success.begin(); _iter1422 != this->success.end(); ++_iter1422) + std::vector ::const_iterator _iter1466; + for (_iter1466 = this->success.begin(); _iter1466 != this->success.end(); ++_iter1466) { - xfer += oprot->writeString((*_iter1422)); + xfer += oprot->writeString((*_iter1466)); } xfer += oprot->writeListEnd(); } @@ -17111,14 +17111,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1423; - ::apache::thrift::protocol::TType _etype1426; - xfer += iprot->readListBegin(_etype1426, _size1423); - (*(this->success)).resize(_size1423); - uint32_t _i1427; - for (_i1427 = 0; _i1427 < _size1423; ++_i1427) + uint32_t _size1467; + ::apache::thrift::protocol::TType _etype1470; + xfer += iprot->readListBegin(_etype1470, _size1467); + (*(this->success)).resize(_size1467); + uint32_t _i1471; + for (_i1471 = 0; _i1471 < _size1467; ++_i1471) { - xfer += iprot->readString((*(this->success))[_i1427]); + xfer += iprot->readString((*(this->success))[_i1471]); } xfer += iprot->readListEnd(); } @@ -17428,14 +17428,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1428; - ::apache::thrift::protocol::TType _etype1431; - xfer += iprot->readListBegin(_etype1431, _size1428); - this->part_vals.resize(_size1428); - uint32_t _i1432; - for (_i1432 = 0; _i1432 < _size1428; ++_i1432) + uint32_t _size1472; + ::apache::thrift::protocol::TType _etype1475; + xfer += iprot->readListBegin(_etype1475, _size1472); + this->part_vals.resize(_size1472); + uint32_t _i1476; + for (_i1476 = 0; _i1476 < _size1472; ++_i1476) { - xfer += iprot->readString(this->part_vals[_i1432]); + xfer += iprot->readString(this->part_vals[_i1476]); } xfer += iprot->readListEnd(); } @@ -17480,10 +17480,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1433; - for (_iter1433 = this->part_vals.begin(); _iter1433 != this->part_vals.end(); ++_iter1433) + std::vector ::const_iterator _iter1477; + for (_iter1477 = this->part_vals.begin(); _iter1477 != this->part_vals.end(); ++_iter1477) { - xfer += oprot->writeString((*_iter1433)); + xfer += oprot->writeString((*_iter1477)); } xfer += oprot->writeListEnd(); } @@ -17519,10 +17519,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1434; - for (_iter1434 = (*(this->part_vals)).begin(); _iter1434 != (*(this->part_vals)).end(); ++_iter1434) + std::vector ::const_iterator _iter1478; + for (_iter1478 = (*(this->part_vals)).begin(); _iter1478 != (*(this->part_vals)).end(); ++_iter1478) { - xfer += oprot->writeString((*_iter1434)); + xfer += oprot->writeString((*_iter1478)); } xfer += oprot->writeListEnd(); } @@ -17567,14 +17567,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1435; - ::apache::thrift::protocol::TType _etype1438; - xfer += iprot->readListBegin(_etype1438, _size1435); - this->success.resize(_size1435); - uint32_t _i1439; - for (_i1439 = 0; _i1439 < _size1435; ++_i1439) + uint32_t _size1479; + ::apache::thrift::protocol::TType _etype1482; + xfer += iprot->readListBegin(_etype1482, _size1479); + this->success.resize(_size1479); + uint32_t _i1483; + for (_i1483 = 0; _i1483 < _size1479; ++_i1483) { - xfer += this->success[_i1439].read(iprot); + xfer += this->success[_i1483].read(iprot); } xfer += iprot->readListEnd(); } @@ -17621,10 +17621,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1440; - for (_iter1440 = this->success.begin(); _iter1440 != this->success.end(); ++_iter1440) + std::vector ::const_iterator _iter1484; + for (_iter1484 = this->success.begin(); _iter1484 != this->success.end(); ++_iter1484) { - xfer += (*_iter1440).write(oprot); + xfer += (*_iter1484).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17673,14 +17673,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1441; - ::apache::thrift::protocol::TType _etype1444; - xfer += iprot->readListBegin(_etype1444, _size1441); - (*(this->success)).resize(_size1441); - uint32_t _i1445; - for (_i1445 = 0; _i1445 < _size1441; ++_i1445) + uint32_t _size1485; + ::apache::thrift::protocol::TType _etype1488; + xfer += iprot->readListBegin(_etype1488, _size1485); + (*(this->success)).resize(_size1485); + uint32_t _i1489; + for (_i1489 = 0; _i1489 < _size1485; ++_i1489) { - xfer += (*(this->success))[_i1445].read(iprot); + xfer += (*(this->success))[_i1489].read(iprot); } xfer += iprot->readListEnd(); } @@ -17763,14 +17763,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1446; - ::apache::thrift::protocol::TType _etype1449; - xfer += iprot->readListBegin(_etype1449, _size1446); - this->part_vals.resize(_size1446); - uint32_t _i1450; - for (_i1450 = 0; _i1450 < _size1446; ++_i1450) + uint32_t _size1490; + ::apache::thrift::protocol::TType _etype1493; + xfer += iprot->readListBegin(_etype1493, _size1490); + this->part_vals.resize(_size1490); + uint32_t _i1494; + for (_i1494 = 0; _i1494 < _size1490; ++_i1494) { - xfer += iprot->readString(this->part_vals[_i1450]); + xfer += iprot->readString(this->part_vals[_i1494]); } xfer += iprot->readListEnd(); } @@ -17799,14 +17799,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1451; - ::apache::thrift::protocol::TType _etype1454; - xfer += iprot->readListBegin(_etype1454, _size1451); - this->group_names.resize(_size1451); - uint32_t _i1455; - for (_i1455 = 0; _i1455 < _size1451; ++_i1455) + uint32_t _size1495; + ::apache::thrift::protocol::TType _etype1498; + xfer += iprot->readListBegin(_etype1498, _size1495); + this->group_names.resize(_size1495); + uint32_t _i1499; + for (_i1499 = 0; _i1499 < _size1495; ++_i1499) { - xfer += iprot->readString(this->group_names[_i1455]); + xfer += iprot->readString(this->group_names[_i1499]); } xfer += iprot->readListEnd(); } @@ -17843,10 +17843,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1456; - for (_iter1456 = this->part_vals.begin(); _iter1456 != this->part_vals.end(); ++_iter1456) + std::vector ::const_iterator _iter1500; + for (_iter1500 = this->part_vals.begin(); _iter1500 != this->part_vals.end(); ++_iter1500) { - xfer += oprot->writeString((*_iter1456)); + xfer += oprot->writeString((*_iter1500)); } xfer += oprot->writeListEnd(); } @@ -17863,10 +17863,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1457; - for (_iter1457 = this->group_names.begin(); _iter1457 != this->group_names.end(); ++_iter1457) + std::vector ::const_iterator _iter1501; + for (_iter1501 = this->group_names.begin(); _iter1501 != this->group_names.end(); ++_iter1501) { - xfer += oprot->writeString((*_iter1457)); + xfer += oprot->writeString((*_iter1501)); } xfer += oprot->writeListEnd(); } @@ -17898,10 +17898,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1458; - for (_iter1458 = (*(this->part_vals)).begin(); _iter1458 != (*(this->part_vals)).end(); ++_iter1458) + std::vector ::const_iterator _iter1502; + for (_iter1502 = (*(this->part_vals)).begin(); _iter1502 != (*(this->part_vals)).end(); ++_iter1502) { - xfer += oprot->writeString((*_iter1458)); + xfer += oprot->writeString((*_iter1502)); } xfer += oprot->writeListEnd(); } @@ -17918,10 +17918,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1459; - for (_iter1459 = (*(this->group_names)).begin(); _iter1459 != (*(this->group_names)).end(); ++_iter1459) + std::vector ::const_iterator _iter1503; + for (_iter1503 = (*(this->group_names)).begin(); _iter1503 != (*(this->group_names)).end(); ++_iter1503) { - xfer += oprot->writeString((*_iter1459)); + xfer += oprot->writeString((*_iter1503)); } xfer += oprot->writeListEnd(); } @@ -17962,14 +17962,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1460; - ::apache::thrift::protocol::TType _etype1463; - xfer += iprot->readListBegin(_etype1463, _size1460); - this->success.resize(_size1460); - uint32_t _i1464; - for (_i1464 = 0; _i1464 < _size1460; ++_i1464) + uint32_t _size1504; + ::apache::thrift::protocol::TType _etype1507; + xfer += iprot->readListBegin(_etype1507, _size1504); + this->success.resize(_size1504); + uint32_t _i1508; + for (_i1508 = 0; _i1508 < _size1504; ++_i1508) { - xfer += this->success[_i1464].read(iprot); + xfer += this->success[_i1508].read(iprot); } xfer += iprot->readListEnd(); } @@ -18016,10 +18016,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1465; - for (_iter1465 = this->success.begin(); _iter1465 != this->success.end(); ++_iter1465) + std::vector ::const_iterator _iter1509; + for (_iter1509 = this->success.begin(); _iter1509 != this->success.end(); ++_iter1509) { - xfer += (*_iter1465).write(oprot); + xfer += (*_iter1509).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18068,14 +18068,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1466; - ::apache::thrift::protocol::TType _etype1469; - xfer += iprot->readListBegin(_etype1469, _size1466); - (*(this->success)).resize(_size1466); - uint32_t _i1470; - for (_i1470 = 0; _i1470 < _size1466; ++_i1470) + uint32_t _size1510; + ::apache::thrift::protocol::TType _etype1513; + xfer += iprot->readListBegin(_etype1513, _size1510); + (*(this->success)).resize(_size1510); + uint32_t _i1514; + for (_i1514 = 0; _i1514 < _size1510; ++_i1514) { - xfer += (*(this->success))[_i1470].read(iprot); + xfer += (*(this->success))[_i1514].read(iprot); } xfer += iprot->readListEnd(); } @@ -18158,14 +18158,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1471; - ::apache::thrift::protocol::TType _etype1474; - xfer += iprot->readListBegin(_etype1474, _size1471); - this->part_vals.resize(_size1471); - uint32_t _i1475; - for (_i1475 = 0; _i1475 < _size1471; ++_i1475) + uint32_t _size1515; + ::apache::thrift::protocol::TType _etype1518; + xfer += iprot->readListBegin(_etype1518, _size1515); + this->part_vals.resize(_size1515); + uint32_t _i1519; + for (_i1519 = 0; _i1519 < _size1515; ++_i1519) { - xfer += iprot->readString(this->part_vals[_i1475]); + xfer += iprot->readString(this->part_vals[_i1519]); } xfer += iprot->readListEnd(); } @@ -18210,10 +18210,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1476; - for (_iter1476 = this->part_vals.begin(); _iter1476 != this->part_vals.end(); ++_iter1476) + std::vector ::const_iterator _iter1520; + for (_iter1520 = this->part_vals.begin(); _iter1520 != this->part_vals.end(); ++_iter1520) { - xfer += oprot->writeString((*_iter1476)); + xfer += oprot->writeString((*_iter1520)); } xfer += oprot->writeListEnd(); } @@ -18249,10 +18249,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1477; - for (_iter1477 = (*(this->part_vals)).begin(); _iter1477 != (*(this->part_vals)).end(); ++_iter1477) + std::vector ::const_iterator _iter1521; + for (_iter1521 = (*(this->part_vals)).begin(); _iter1521 != (*(this->part_vals)).end(); ++_iter1521) { - xfer += oprot->writeString((*_iter1477)); + xfer += oprot->writeString((*_iter1521)); } xfer += oprot->writeListEnd(); } @@ -18297,14 +18297,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1478; - ::apache::thrift::protocol::TType _etype1481; - xfer += iprot->readListBegin(_etype1481, _size1478); - this->success.resize(_size1478); - uint32_t _i1482; - for (_i1482 = 0; _i1482 < _size1478; ++_i1482) + uint32_t _size1522; + ::apache::thrift::protocol::TType _etype1525; + xfer += iprot->readListBegin(_etype1525, _size1522); + this->success.resize(_size1522); + uint32_t _i1526; + for (_i1526 = 0; _i1526 < _size1522; ++_i1526) { - xfer += iprot->readString(this->success[_i1482]); + xfer += iprot->readString(this->success[_i1526]); } xfer += iprot->readListEnd(); } @@ -18351,10 +18351,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1483; - for (_iter1483 = this->success.begin(); _iter1483 != this->success.end(); ++_iter1483) + std::vector ::const_iterator _iter1527; + for (_iter1527 = this->success.begin(); _iter1527 != this->success.end(); ++_iter1527) { - xfer += oprot->writeString((*_iter1483)); + xfer += oprot->writeString((*_iter1527)); } xfer += oprot->writeListEnd(); } @@ -18403,14 +18403,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1484; - ::apache::thrift::protocol::TType _etype1487; - xfer += iprot->readListBegin(_etype1487, _size1484); - (*(this->success)).resize(_size1484); - uint32_t _i1488; - for (_i1488 = 0; _i1488 < _size1484; ++_i1488) + uint32_t _size1528; + ::apache::thrift::protocol::TType _etype1531; + xfer += iprot->readListBegin(_etype1531, _size1528); + (*(this->success)).resize(_size1528); + uint32_t _i1532; + for (_i1532 = 0; _i1532 < _size1528; ++_i1532) { - xfer += iprot->readString((*(this->success))[_i1488]); + xfer += iprot->readString((*(this->success))[_i1532]); } xfer += iprot->readListEnd(); } @@ -18604,14 +18604,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1489; - ::apache::thrift::protocol::TType _etype1492; - xfer += iprot->readListBegin(_etype1492, _size1489); - this->success.resize(_size1489); - uint32_t _i1493; - for (_i1493 = 0; _i1493 < _size1489; ++_i1493) + uint32_t _size1533; + ::apache::thrift::protocol::TType _etype1536; + xfer += iprot->readListBegin(_etype1536, _size1533); + this->success.resize(_size1533); + uint32_t _i1537; + for (_i1537 = 0; _i1537 < _size1533; ++_i1537) { - xfer += this->success[_i1493].read(iprot); + xfer += this->success[_i1537].read(iprot); } xfer += iprot->readListEnd(); } @@ -18658,10 +18658,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1494; - for (_iter1494 = this->success.begin(); _iter1494 != this->success.end(); ++_iter1494) + std::vector ::const_iterator _iter1538; + for (_iter1538 = this->success.begin(); _iter1538 != this->success.end(); ++_iter1538) { - xfer += (*_iter1494).write(oprot); + xfer += (*_iter1538).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18710,14 +18710,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1495; - ::apache::thrift::protocol::TType _etype1498; - xfer += iprot->readListBegin(_etype1498, _size1495); - (*(this->success)).resize(_size1495); - uint32_t _i1499; - for (_i1499 = 0; _i1499 < _size1495; ++_i1499) + uint32_t _size1539; + ::apache::thrift::protocol::TType _etype1542; + xfer += iprot->readListBegin(_etype1542, _size1539); + (*(this->success)).resize(_size1539); + uint32_t _i1543; + for (_i1543 = 0; _i1543 < _size1539; ++_i1543) { - xfer += (*(this->success))[_i1499].read(iprot); + xfer += (*(this->success))[_i1543].read(iprot); } xfer += iprot->readListEnd(); } @@ -18911,14 +18911,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1500; - ::apache::thrift::protocol::TType _etype1503; - xfer += iprot->readListBegin(_etype1503, _size1500); - this->success.resize(_size1500); - uint32_t _i1504; - for (_i1504 = 0; _i1504 < _size1500; ++_i1504) + uint32_t _size1544; + ::apache::thrift::protocol::TType _etype1547; + xfer += iprot->readListBegin(_etype1547, _size1544); + this->success.resize(_size1544); + uint32_t _i1548; + for (_i1548 = 0; _i1548 < _size1544; ++_i1548) { - xfer += this->success[_i1504].read(iprot); + xfer += this->success[_i1548].read(iprot); } xfer += iprot->readListEnd(); } @@ -18965,10 +18965,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1505; - for (_iter1505 = this->success.begin(); _iter1505 != this->success.end(); ++_iter1505) + std::vector ::const_iterator _iter1549; + for (_iter1549 = this->success.begin(); _iter1549 != this->success.end(); ++_iter1549) { - xfer += (*_iter1505).write(oprot); + xfer += (*_iter1549).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19017,14 +19017,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1506; - ::apache::thrift::protocol::TType _etype1509; - xfer += iprot->readListBegin(_etype1509, _size1506); - (*(this->success)).resize(_size1506); - uint32_t _i1510; - for (_i1510 = 0; _i1510 < _size1506; ++_i1510) + uint32_t _size1550; + ::apache::thrift::protocol::TType _etype1553; + xfer += iprot->readListBegin(_etype1553, _size1550); + (*(this->success)).resize(_size1550); + uint32_t _i1554; + for (_i1554 = 0; _i1554 < _size1550; ++_i1554) { - xfer += (*(this->success))[_i1510].read(iprot); + xfer += (*(this->success))[_i1554].read(iprot); } xfer += iprot->readListEnd(); } @@ -19593,14 +19593,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size1511; - ::apache::thrift::protocol::TType _etype1514; - xfer += iprot->readListBegin(_etype1514, _size1511); - this->names.resize(_size1511); - uint32_t _i1515; - for (_i1515 = 0; _i1515 < _size1511; ++_i1515) + uint32_t _size1555; + ::apache::thrift::protocol::TType _etype1558; + xfer += iprot->readListBegin(_etype1558, _size1555); + this->names.resize(_size1555); + uint32_t _i1559; + for (_i1559 = 0; _i1559 < _size1555; ++_i1559) { - xfer += iprot->readString(this->names[_i1515]); + xfer += iprot->readString(this->names[_i1559]); } xfer += iprot->readListEnd(); } @@ -19637,10 +19637,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1516; - for (_iter1516 = this->names.begin(); _iter1516 != this->names.end(); ++_iter1516) + std::vector ::const_iterator _iter1560; + for (_iter1560 = this->names.begin(); _iter1560 != this->names.end(); ++_iter1560) { - xfer += oprot->writeString((*_iter1516)); + xfer += oprot->writeString((*_iter1560)); } xfer += oprot->writeListEnd(); } @@ -19672,10 +19672,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1517; - for (_iter1517 = (*(this->names)).begin(); _iter1517 != (*(this->names)).end(); ++_iter1517) + std::vector ::const_iterator _iter1561; + for (_iter1561 = (*(this->names)).begin(); _iter1561 != (*(this->names)).end(); ++_iter1561) { - xfer += oprot->writeString((*_iter1517)); + xfer += oprot->writeString((*_iter1561)); } xfer += oprot->writeListEnd(); } @@ -19716,14 +19716,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1518; - ::apache::thrift::protocol::TType _etype1521; - xfer += iprot->readListBegin(_etype1521, _size1518); - this->success.resize(_size1518); - uint32_t _i1522; - for (_i1522 = 0; _i1522 < _size1518; ++_i1522) + uint32_t _size1562; + ::apache::thrift::protocol::TType _etype1565; + xfer += iprot->readListBegin(_etype1565, _size1562); + this->success.resize(_size1562); + uint32_t _i1566; + for (_i1566 = 0; _i1566 < _size1562; ++_i1566) { - xfer += this->success[_i1522].read(iprot); + xfer += this->success[_i1566].read(iprot); } xfer += iprot->readListEnd(); } @@ -19770,10 +19770,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1523; - for (_iter1523 = this->success.begin(); _iter1523 != this->success.end(); ++_iter1523) + std::vector ::const_iterator _iter1567; + for (_iter1567 = this->success.begin(); _iter1567 != this->success.end(); ++_iter1567) { - xfer += (*_iter1523).write(oprot); + xfer += (*_iter1567).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19822,14 +19822,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1524; - ::apache::thrift::protocol::TType _etype1527; - xfer += iprot->readListBegin(_etype1527, _size1524); - (*(this->success)).resize(_size1524); - uint32_t _i1528; - for (_i1528 = 0; _i1528 < _size1524; ++_i1528) + uint32_t _size1568; + ::apache::thrift::protocol::TType _etype1571; + xfer += iprot->readListBegin(_etype1571, _size1568); + (*(this->success)).resize(_size1568); + uint32_t _i1572; + for (_i1572 = 0; _i1572 < _size1568; ++_i1572) { - xfer += (*(this->success))[_i1528].read(iprot); + xfer += (*(this->success))[_i1572].read(iprot); } xfer += iprot->readListEnd(); } @@ -20151,14 +20151,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1529; - ::apache::thrift::protocol::TType _etype1532; - xfer += iprot->readListBegin(_etype1532, _size1529); - this->new_parts.resize(_size1529); - uint32_t _i1533; - for (_i1533 = 0; _i1533 < _size1529; ++_i1533) + uint32_t _size1573; + ::apache::thrift::protocol::TType _etype1576; + xfer += iprot->readListBegin(_etype1576, _size1573); + this->new_parts.resize(_size1573); + uint32_t _i1577; + for (_i1577 = 0; _i1577 < _size1573; ++_i1577) { - xfer += this->new_parts[_i1533].read(iprot); + xfer += this->new_parts[_i1577].read(iprot); } xfer += iprot->readListEnd(); } @@ -20195,10 +20195,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1534; - for (_iter1534 = this->new_parts.begin(); _iter1534 != this->new_parts.end(); ++_iter1534) + std::vector ::const_iterator _iter1578; + for (_iter1578 = this->new_parts.begin(); _iter1578 != this->new_parts.end(); ++_iter1578) { - xfer += (*_iter1534).write(oprot); + xfer += (*_iter1578).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20230,10 +20230,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1535; - for (_iter1535 = (*(this->new_parts)).begin(); _iter1535 != (*(this->new_parts)).end(); ++_iter1535) + std::vector ::const_iterator _iter1579; + for (_iter1579 = (*(this->new_parts)).begin(); _iter1579 != (*(this->new_parts)).end(); ++_iter1579) { - xfer += (*_iter1535).write(oprot); + xfer += (*_iter1579).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20418,14 +20418,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1536; - ::apache::thrift::protocol::TType _etype1539; - xfer += iprot->readListBegin(_etype1539, _size1536); - this->new_parts.resize(_size1536); - uint32_t _i1540; - for (_i1540 = 0; _i1540 < _size1536; ++_i1540) + uint32_t _size1580; + ::apache::thrift::protocol::TType _etype1583; + xfer += iprot->readListBegin(_etype1583, _size1580); + this->new_parts.resize(_size1580); + uint32_t _i1584; + for (_i1584 = 0; _i1584 < _size1580; ++_i1584) { - xfer += this->new_parts[_i1540].read(iprot); + xfer += this->new_parts[_i1584].read(iprot); } xfer += iprot->readListEnd(); } @@ -20470,10 +20470,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1541; - for (_iter1541 = this->new_parts.begin(); _iter1541 != this->new_parts.end(); ++_iter1541) + std::vector ::const_iterator _iter1585; + for (_iter1585 = this->new_parts.begin(); _iter1585 != this->new_parts.end(); ++_iter1585) { - xfer += (*_iter1541).write(oprot); + xfer += (*_iter1585).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20509,10 +20509,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1542; - for (_iter1542 = (*(this->new_parts)).begin(); _iter1542 != (*(this->new_parts)).end(); ++_iter1542) + std::vector ::const_iterator _iter1586; + for (_iter1586 = (*(this->new_parts)).begin(); _iter1586 != (*(this->new_parts)).end(); ++_iter1586) { - xfer += (*_iter1542).write(oprot); + xfer += (*_iter1586).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20956,14 +20956,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1543; - ::apache::thrift::protocol::TType _etype1546; - xfer += iprot->readListBegin(_etype1546, _size1543); - this->part_vals.resize(_size1543); - uint32_t _i1547; - for (_i1547 = 0; _i1547 < _size1543; ++_i1547) + uint32_t _size1587; + ::apache::thrift::protocol::TType _etype1590; + xfer += iprot->readListBegin(_etype1590, _size1587); + this->part_vals.resize(_size1587); + uint32_t _i1591; + for (_i1591 = 0; _i1591 < _size1587; ++_i1591) { - xfer += iprot->readString(this->part_vals[_i1547]); + xfer += iprot->readString(this->part_vals[_i1591]); } xfer += iprot->readListEnd(); } @@ -21008,10 +21008,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1548; - for (_iter1548 = this->part_vals.begin(); _iter1548 != this->part_vals.end(); ++_iter1548) + std::vector ::const_iterator _iter1592; + for (_iter1592 = this->part_vals.begin(); _iter1592 != this->part_vals.end(); ++_iter1592) { - xfer += oprot->writeString((*_iter1548)); + xfer += oprot->writeString((*_iter1592)); } xfer += oprot->writeListEnd(); } @@ -21047,10 +21047,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1549; - for (_iter1549 = (*(this->part_vals)).begin(); _iter1549 != (*(this->part_vals)).end(); ++_iter1549) + std::vector ::const_iterator _iter1593; + for (_iter1593 = (*(this->part_vals)).begin(); _iter1593 != (*(this->part_vals)).end(); ++_iter1593) { - xfer += oprot->writeString((*_iter1549)); + xfer += oprot->writeString((*_iter1593)); } xfer += oprot->writeListEnd(); } @@ -21223,14 +21223,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1550; - ::apache::thrift::protocol::TType _etype1553; - xfer += iprot->readListBegin(_etype1553, _size1550); - this->part_vals.resize(_size1550); - uint32_t _i1554; - for (_i1554 = 0; _i1554 < _size1550; ++_i1554) + uint32_t _size1594; + ::apache::thrift::protocol::TType _etype1597; + xfer += iprot->readListBegin(_etype1597, _size1594); + this->part_vals.resize(_size1594); + uint32_t _i1598; + for (_i1598 = 0; _i1598 < _size1594; ++_i1598) { - xfer += iprot->readString(this->part_vals[_i1554]); + xfer += iprot->readString(this->part_vals[_i1598]); } xfer += iprot->readListEnd(); } @@ -21267,10 +21267,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1555; - for (_iter1555 = this->part_vals.begin(); _iter1555 != this->part_vals.end(); ++_iter1555) + std::vector ::const_iterator _iter1599; + for (_iter1599 = this->part_vals.begin(); _iter1599 != this->part_vals.end(); ++_iter1599) { - xfer += oprot->writeString((*_iter1555)); + xfer += oprot->writeString((*_iter1599)); } xfer += oprot->writeListEnd(); } @@ -21298,10 +21298,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1556; - for (_iter1556 = (*(this->part_vals)).begin(); _iter1556 != (*(this->part_vals)).end(); ++_iter1556) + std::vector ::const_iterator _iter1600; + for (_iter1600 = (*(this->part_vals)).begin(); _iter1600 != (*(this->part_vals)).end(); ++_iter1600) { - xfer += oprot->writeString((*_iter1556)); + xfer += oprot->writeString((*_iter1600)); } xfer += oprot->writeListEnd(); } @@ -21776,14 +21776,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1557; - ::apache::thrift::protocol::TType _etype1560; - xfer += iprot->readListBegin(_etype1560, _size1557); - this->success.resize(_size1557); - uint32_t _i1561; - for (_i1561 = 0; _i1561 < _size1557; ++_i1561) + uint32_t _size1601; + ::apache::thrift::protocol::TType _etype1604; + xfer += iprot->readListBegin(_etype1604, _size1601); + this->success.resize(_size1601); + uint32_t _i1605; + for (_i1605 = 0; _i1605 < _size1601; ++_i1605) { - xfer += iprot->readString(this->success[_i1561]); + xfer += iprot->readString(this->success[_i1605]); } xfer += iprot->readListEnd(); } @@ -21822,10 +21822,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1562; - for (_iter1562 = this->success.begin(); _iter1562 != this->success.end(); ++_iter1562) + std::vector ::const_iterator _iter1606; + for (_iter1606 = this->success.begin(); _iter1606 != this->success.end(); ++_iter1606) { - xfer += oprot->writeString((*_iter1562)); + xfer += oprot->writeString((*_iter1606)); } xfer += oprot->writeListEnd(); } @@ -21870,14 +21870,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1563; - ::apache::thrift::protocol::TType _etype1566; - xfer += iprot->readListBegin(_etype1566, _size1563); - (*(this->success)).resize(_size1563); - uint32_t _i1567; - for (_i1567 = 0; _i1567 < _size1563; ++_i1567) + uint32_t _size1607; + ::apache::thrift::protocol::TType _etype1610; + xfer += iprot->readListBegin(_etype1610, _size1607); + (*(this->success)).resize(_size1607); + uint32_t _i1611; + for (_i1611 = 0; _i1611 < _size1607; ++_i1611) { - xfer += iprot->readString((*(this->success))[_i1567]); + xfer += iprot->readString((*(this->success))[_i1611]); } xfer += iprot->readListEnd(); } @@ -22015,17 +22015,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1568; - ::apache::thrift::protocol::TType _ktype1569; - ::apache::thrift::protocol::TType _vtype1570; - xfer += iprot->readMapBegin(_ktype1569, _vtype1570, _size1568); - uint32_t _i1572; - for (_i1572 = 0; _i1572 < _size1568; ++_i1572) + uint32_t _size1612; + ::apache::thrift::protocol::TType _ktype1613; + ::apache::thrift::protocol::TType _vtype1614; + xfer += iprot->readMapBegin(_ktype1613, _vtype1614, _size1612); + uint32_t _i1616; + for (_i1616 = 0; _i1616 < _size1612; ++_i1616) { - std::string _key1573; - xfer += iprot->readString(_key1573); - std::string& _val1574 = this->success[_key1573]; - xfer += iprot->readString(_val1574); + std::string _key1617; + xfer += iprot->readString(_key1617); + std::string& _val1618 = this->success[_key1617]; + xfer += iprot->readString(_val1618); } xfer += iprot->readMapEnd(); } @@ -22064,11 +22064,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1575; - for (_iter1575 = this->success.begin(); _iter1575 != this->success.end(); ++_iter1575) + std::map ::const_iterator _iter1619; + for (_iter1619 = this->success.begin(); _iter1619 != this->success.end(); ++_iter1619) { - xfer += oprot->writeString(_iter1575->first); - xfer += oprot->writeString(_iter1575->second); + xfer += oprot->writeString(_iter1619->first); + xfer += oprot->writeString(_iter1619->second); } xfer += oprot->writeMapEnd(); } @@ -22113,17 +22113,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1576; - ::apache::thrift::protocol::TType _ktype1577; - ::apache::thrift::protocol::TType _vtype1578; - xfer += iprot->readMapBegin(_ktype1577, _vtype1578, _size1576); - uint32_t _i1580; - for (_i1580 = 0; _i1580 < _size1576; ++_i1580) + uint32_t _size1620; + ::apache::thrift::protocol::TType _ktype1621; + ::apache::thrift::protocol::TType _vtype1622; + xfer += iprot->readMapBegin(_ktype1621, _vtype1622, _size1620); + uint32_t _i1624; + for (_i1624 = 0; _i1624 < _size1620; ++_i1624) { - std::string _key1581; - xfer += iprot->readString(_key1581); - std::string& _val1582 = (*(this->success))[_key1581]; - xfer += iprot->readString(_val1582); + std::string _key1625; + xfer += iprot->readString(_key1625); + std::string& _val1626 = (*(this->success))[_key1625]; + xfer += iprot->readString(_val1626); } xfer += iprot->readMapEnd(); } @@ -22198,17 +22198,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1583; - ::apache::thrift::protocol::TType _ktype1584; - ::apache::thrift::protocol::TType _vtype1585; - xfer += iprot->readMapBegin(_ktype1584, _vtype1585, _size1583); - uint32_t _i1587; - for (_i1587 = 0; _i1587 < _size1583; ++_i1587) + uint32_t _size1627; + ::apache::thrift::protocol::TType _ktype1628; + ::apache::thrift::protocol::TType _vtype1629; + xfer += iprot->readMapBegin(_ktype1628, _vtype1629, _size1627); + uint32_t _i1631; + for (_i1631 = 0; _i1631 < _size1627; ++_i1631) { - std::string _key1588; - xfer += iprot->readString(_key1588); - std::string& _val1589 = this->part_vals[_key1588]; - xfer += iprot->readString(_val1589); + std::string _key1632; + xfer += iprot->readString(_key1632); + std::string& _val1633 = this->part_vals[_key1632]; + xfer += iprot->readString(_val1633); } xfer += iprot->readMapEnd(); } @@ -22219,9 +22219,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1590; - xfer += iprot->readI32(ecast1590); - this->eventType = (PartitionEventType::type)ecast1590; + int32_t ecast1634; + xfer += iprot->readI32(ecast1634); + this->eventType = (PartitionEventType::type)ecast1634; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -22255,11 +22255,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1591; - for (_iter1591 = this->part_vals.begin(); _iter1591 != this->part_vals.end(); ++_iter1591) + std::map ::const_iterator _iter1635; + for (_iter1635 = this->part_vals.begin(); _iter1635 != this->part_vals.end(); ++_iter1635) { - xfer += oprot->writeString(_iter1591->first); - xfer += oprot->writeString(_iter1591->second); + xfer += oprot->writeString(_iter1635->first); + xfer += oprot->writeString(_iter1635->second); } xfer += oprot->writeMapEnd(); } @@ -22295,11 +22295,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1592; - for (_iter1592 = (*(this->part_vals)).begin(); _iter1592 != (*(this->part_vals)).end(); ++_iter1592) + std::map ::const_iterator _iter1636; + for (_iter1636 = (*(this->part_vals)).begin(); _iter1636 != (*(this->part_vals)).end(); ++_iter1636) { - xfer += oprot->writeString(_iter1592->first); - xfer += oprot->writeString(_iter1592->second); + xfer += oprot->writeString(_iter1636->first); + xfer += oprot->writeString(_iter1636->second); } xfer += oprot->writeMapEnd(); } @@ -22568,17 +22568,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1593; - ::apache::thrift::protocol::TType _ktype1594; - ::apache::thrift::protocol::TType _vtype1595; - xfer += iprot->readMapBegin(_ktype1594, _vtype1595, _size1593); - uint32_t _i1597; - for (_i1597 = 0; _i1597 < _size1593; ++_i1597) + uint32_t _size1637; + ::apache::thrift::protocol::TType _ktype1638; + ::apache::thrift::protocol::TType _vtype1639; + xfer += iprot->readMapBegin(_ktype1638, _vtype1639, _size1637); + uint32_t _i1641; + for (_i1641 = 0; _i1641 < _size1637; ++_i1641) { - std::string _key1598; - xfer += iprot->readString(_key1598); - std::string& _val1599 = this->part_vals[_key1598]; - xfer += iprot->readString(_val1599); + std::string _key1642; + xfer += iprot->readString(_key1642); + std::string& _val1643 = this->part_vals[_key1642]; + xfer += iprot->readString(_val1643); } xfer += iprot->readMapEnd(); } @@ -22589,9 +22589,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1600; - xfer += iprot->readI32(ecast1600); - this->eventType = (PartitionEventType::type)ecast1600; + int32_t ecast1644; + xfer += iprot->readI32(ecast1644); + this->eventType = (PartitionEventType::type)ecast1644; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -22625,11 +22625,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1601; - for (_iter1601 = this->part_vals.begin(); _iter1601 != this->part_vals.end(); ++_iter1601) + std::map ::const_iterator _iter1645; + for (_iter1645 = this->part_vals.begin(); _iter1645 != this->part_vals.end(); ++_iter1645) { - xfer += oprot->writeString(_iter1601->first); - xfer += oprot->writeString(_iter1601->second); + xfer += oprot->writeString(_iter1645->first); + xfer += oprot->writeString(_iter1645->second); } xfer += oprot->writeMapEnd(); } @@ -22665,11 +22665,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1602; - for (_iter1602 = (*(this->part_vals)).begin(); _iter1602 != (*(this->part_vals)).end(); ++_iter1602) + std::map ::const_iterator _iter1646; + for (_iter1646 = (*(this->part_vals)).begin(); _iter1646 != (*(this->part_vals)).end(); ++_iter1646) { - xfer += oprot->writeString(_iter1602->first); - xfer += oprot->writeString(_iter1602->second); + xfer += oprot->writeString(_iter1646->first); + xfer += oprot->writeString(_iter1646->second); } xfer += oprot->writeMapEnd(); } @@ -24105,14 +24105,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1603; - ::apache::thrift::protocol::TType _etype1606; - xfer += iprot->readListBegin(_etype1606, _size1603); - this->success.resize(_size1603); - uint32_t _i1607; - for (_i1607 = 0; _i1607 < _size1603; ++_i1607) + uint32_t _size1647; + ::apache::thrift::protocol::TType _etype1650; + xfer += iprot->readListBegin(_etype1650, _size1647); + this->success.resize(_size1647); + uint32_t _i1651; + for (_i1651 = 0; _i1651 < _size1647; ++_i1651) { - xfer += this->success[_i1607].read(iprot); + xfer += this->success[_i1651].read(iprot); } xfer += iprot->readListEnd(); } @@ -24159,10 +24159,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1608; - for (_iter1608 = this->success.begin(); _iter1608 != this->success.end(); ++_iter1608) + std::vector ::const_iterator _iter1652; + for (_iter1652 = this->success.begin(); _iter1652 != this->success.end(); ++_iter1652) { - xfer += (*_iter1608).write(oprot); + xfer += (*_iter1652).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24211,14 +24211,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1609; - ::apache::thrift::protocol::TType _etype1612; - xfer += iprot->readListBegin(_etype1612, _size1609); - (*(this->success)).resize(_size1609); - uint32_t _i1613; - for (_i1613 = 0; _i1613 < _size1609; ++_i1613) + uint32_t _size1653; + ::apache::thrift::protocol::TType _etype1656; + xfer += iprot->readListBegin(_etype1656, _size1653); + (*(this->success)).resize(_size1653); + uint32_t _i1657; + for (_i1657 = 0; _i1657 < _size1653; ++_i1657) { - xfer += (*(this->success))[_i1613].read(iprot); + xfer += (*(this->success))[_i1657].read(iprot); } xfer += iprot->readListEnd(); } @@ -24396,14 +24396,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1614; - ::apache::thrift::protocol::TType _etype1617; - xfer += iprot->readListBegin(_etype1617, _size1614); - this->success.resize(_size1614); - uint32_t _i1618; - for (_i1618 = 0; _i1618 < _size1614; ++_i1618) + uint32_t _size1658; + ::apache::thrift::protocol::TType _etype1661; + xfer += iprot->readListBegin(_etype1661, _size1658); + this->success.resize(_size1658); + uint32_t _i1662; + for (_i1662 = 0; _i1662 < _size1658; ++_i1662) { - xfer += iprot->readString(this->success[_i1618]); + xfer += iprot->readString(this->success[_i1662]); } xfer += iprot->readListEnd(); } @@ -24442,10 +24442,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1619; - for (_iter1619 = this->success.begin(); _iter1619 != this->success.end(); ++_iter1619) + std::vector ::const_iterator _iter1663; + for (_iter1663 = this->success.begin(); _iter1663 != this->success.end(); ++_iter1663) { - xfer += oprot->writeString((*_iter1619)); + xfer += oprot->writeString((*_iter1663)); } xfer += oprot->writeListEnd(); } @@ -24490,14 +24490,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1620; - ::apache::thrift::protocol::TType _etype1623; - xfer += iprot->readListBegin(_etype1623, _size1620); - (*(this->success)).resize(_size1620); - uint32_t _i1624; - for (_i1624 = 0; _i1624 < _size1620; ++_i1624) + uint32_t _size1664; + ::apache::thrift::protocol::TType _etype1667; + xfer += iprot->readListBegin(_etype1667, _size1664); + (*(this->success)).resize(_size1664); + uint32_t _i1668; + for (_i1668 = 0; _i1668 < _size1664; ++_i1668) { - xfer += iprot->readString((*(this->success))[_i1624]); + xfer += iprot->readString((*(this->success))[_i1668]); } xfer += iprot->readListEnd(); } @@ -28978,14 +28978,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1625; - ::apache::thrift::protocol::TType _etype1628; - xfer += iprot->readListBegin(_etype1628, _size1625); - this->success.resize(_size1625); - uint32_t _i1629; - for (_i1629 = 0; _i1629 < _size1625; ++_i1629) + uint32_t _size1669; + ::apache::thrift::protocol::TType _etype1672; + xfer += iprot->readListBegin(_etype1672, _size1669); + this->success.resize(_size1669); + uint32_t _i1673; + for (_i1673 = 0; _i1673 < _size1669; ++_i1673) { - xfer += iprot->readString(this->success[_i1629]); + xfer += iprot->readString(this->success[_i1673]); } xfer += iprot->readListEnd(); } @@ -29024,10 +29024,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1630; - for (_iter1630 = this->success.begin(); _iter1630 != this->success.end(); ++_iter1630) + std::vector ::const_iterator _iter1674; + for (_iter1674 = this->success.begin(); _iter1674 != this->success.end(); ++_iter1674) { - xfer += oprot->writeString((*_iter1630)); + xfer += oprot->writeString((*_iter1674)); } xfer += oprot->writeListEnd(); } @@ -29072,14 +29072,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1631; - ::apache::thrift::protocol::TType _etype1634; - xfer += iprot->readListBegin(_etype1634, _size1631); - (*(this->success)).resize(_size1631); - uint32_t _i1635; - for (_i1635 = 0; _i1635 < _size1631; ++_i1635) + uint32_t _size1675; + ::apache::thrift::protocol::TType _etype1678; + xfer += iprot->readListBegin(_etype1678, _size1675); + (*(this->success)).resize(_size1675); + uint32_t _i1679; + for (_i1679 = 0; _i1679 < _size1675; ++_i1679) { - xfer += iprot->readString((*(this->success))[_i1635]); + xfer += iprot->readString((*(this->success))[_i1679]); } xfer += iprot->readListEnd(); } @@ -30039,14 +30039,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1636; - ::apache::thrift::protocol::TType _etype1639; - xfer += iprot->readListBegin(_etype1639, _size1636); - this->success.resize(_size1636); - uint32_t _i1640; - for (_i1640 = 0; _i1640 < _size1636; ++_i1640) + uint32_t _size1680; + ::apache::thrift::protocol::TType _etype1683; + xfer += iprot->readListBegin(_etype1683, _size1680); + this->success.resize(_size1680); + uint32_t _i1684; + for (_i1684 = 0; _i1684 < _size1680; ++_i1684) { - xfer += iprot->readString(this->success[_i1640]); + xfer += iprot->readString(this->success[_i1684]); } xfer += iprot->readListEnd(); } @@ -30085,10 +30085,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1641; - for (_iter1641 = this->success.begin(); _iter1641 != this->success.end(); ++_iter1641) + std::vector ::const_iterator _iter1685; + for (_iter1685 = this->success.begin(); _iter1685 != this->success.end(); ++_iter1685) { - xfer += oprot->writeString((*_iter1641)); + xfer += oprot->writeString((*_iter1685)); } xfer += oprot->writeListEnd(); } @@ -30133,14 +30133,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1642; - ::apache::thrift::protocol::TType _etype1645; - xfer += iprot->readListBegin(_etype1645, _size1642); - (*(this->success)).resize(_size1642); - uint32_t _i1646; - for (_i1646 = 0; _i1646 < _size1642; ++_i1646) + uint32_t _size1686; + ::apache::thrift::protocol::TType _etype1689; + xfer += iprot->readListBegin(_etype1689, _size1686); + (*(this->success)).resize(_size1686); + uint32_t _i1690; + for (_i1690 = 0; _i1690 < _size1686; ++_i1690) { - xfer += iprot->readString((*(this->success))[_i1646]); + xfer += iprot->readString((*(this->success))[_i1690]); } xfer += iprot->readListEnd(); } @@ -30213,9 +30213,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1647; - xfer += iprot->readI32(ecast1647); - this->principal_type = (PrincipalType::type)ecast1647; + int32_t ecast1691; + xfer += iprot->readI32(ecast1691); + this->principal_type = (PrincipalType::type)ecast1691; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30231,9 +30231,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1648; - xfer += iprot->readI32(ecast1648); - this->grantorType = (PrincipalType::type)ecast1648; + int32_t ecast1692; + xfer += iprot->readI32(ecast1692); + this->grantorType = (PrincipalType::type)ecast1692; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -30504,9 +30504,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1649; - xfer += iprot->readI32(ecast1649); - this->principal_type = (PrincipalType::type)ecast1649; + int32_t ecast1693; + xfer += iprot->readI32(ecast1693); + this->principal_type = (PrincipalType::type)ecast1693; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30737,9 +30737,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1650; - xfer += iprot->readI32(ecast1650); - this->principal_type = (PrincipalType::type)ecast1650; + int32_t ecast1694; + xfer += iprot->readI32(ecast1694); + this->principal_type = (PrincipalType::type)ecast1694; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30828,14 +30828,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1651; - ::apache::thrift::protocol::TType _etype1654; - xfer += iprot->readListBegin(_etype1654, _size1651); - this->success.resize(_size1651); - uint32_t _i1655; - for (_i1655 = 0; _i1655 < _size1651; ++_i1655) + uint32_t _size1695; + ::apache::thrift::protocol::TType _etype1698; + xfer += iprot->readListBegin(_etype1698, _size1695); + this->success.resize(_size1695); + uint32_t _i1699; + for (_i1699 = 0; _i1699 < _size1695; ++_i1699) { - xfer += this->success[_i1655].read(iprot); + xfer += this->success[_i1699].read(iprot); } xfer += iprot->readListEnd(); } @@ -30874,10 +30874,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1656; - for (_iter1656 = this->success.begin(); _iter1656 != this->success.end(); ++_iter1656) + std::vector ::const_iterator _iter1700; + for (_iter1700 = this->success.begin(); _iter1700 != this->success.end(); ++_iter1700) { - xfer += (*_iter1656).write(oprot); + xfer += (*_iter1700).write(oprot); } xfer += oprot->writeListEnd(); } @@ -30922,14 +30922,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1657; - ::apache::thrift::protocol::TType _etype1660; - xfer += iprot->readListBegin(_etype1660, _size1657); - (*(this->success)).resize(_size1657); - uint32_t _i1661; - for (_i1661 = 0; _i1661 < _size1657; ++_i1661) + uint32_t _size1701; + ::apache::thrift::protocol::TType _etype1704; + xfer += iprot->readListBegin(_etype1704, _size1701); + (*(this->success)).resize(_size1701); + uint32_t _i1705; + for (_i1705 = 0; _i1705 < _size1701; ++_i1705) { - xfer += (*(this->success))[_i1661].read(iprot); + xfer += (*(this->success))[_i1705].read(iprot); } xfer += iprot->readListEnd(); } @@ -31625,14 +31625,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1662; - ::apache::thrift::protocol::TType _etype1665; - xfer += iprot->readListBegin(_etype1665, _size1662); - this->group_names.resize(_size1662); - uint32_t _i1666; - for (_i1666 = 0; _i1666 < _size1662; ++_i1666) + uint32_t _size1706; + ::apache::thrift::protocol::TType _etype1709; + xfer += iprot->readListBegin(_etype1709, _size1706); + this->group_names.resize(_size1706); + uint32_t _i1710; + for (_i1710 = 0; _i1710 < _size1706; ++_i1710) { - xfer += iprot->readString(this->group_names[_i1666]); + xfer += iprot->readString(this->group_names[_i1710]); } xfer += iprot->readListEnd(); } @@ -31669,10 +31669,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1667; - for (_iter1667 = this->group_names.begin(); _iter1667 != this->group_names.end(); ++_iter1667) + std::vector ::const_iterator _iter1711; + for (_iter1711 = this->group_names.begin(); _iter1711 != this->group_names.end(); ++_iter1711) { - xfer += oprot->writeString((*_iter1667)); + xfer += oprot->writeString((*_iter1711)); } xfer += oprot->writeListEnd(); } @@ -31704,10 +31704,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1668; - for (_iter1668 = (*(this->group_names)).begin(); _iter1668 != (*(this->group_names)).end(); ++_iter1668) + std::vector ::const_iterator _iter1712; + for (_iter1712 = (*(this->group_names)).begin(); _iter1712 != (*(this->group_names)).end(); ++_iter1712) { - xfer += oprot->writeString((*_iter1668)); + xfer += oprot->writeString((*_iter1712)); } xfer += oprot->writeListEnd(); } @@ -31882,9 +31882,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1669; - xfer += iprot->readI32(ecast1669); - this->principal_type = (PrincipalType::type)ecast1669; + int32_t ecast1713; + xfer += iprot->readI32(ecast1713); + this->principal_type = (PrincipalType::type)ecast1713; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -31989,14 +31989,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1670; - ::apache::thrift::protocol::TType _etype1673; - xfer += iprot->readListBegin(_etype1673, _size1670); - this->success.resize(_size1670); - uint32_t _i1674; - for (_i1674 = 0; _i1674 < _size1670; ++_i1674) + uint32_t _size1714; + ::apache::thrift::protocol::TType _etype1717; + xfer += iprot->readListBegin(_etype1717, _size1714); + this->success.resize(_size1714); + uint32_t _i1718; + for (_i1718 = 0; _i1718 < _size1714; ++_i1718) { - xfer += this->success[_i1674].read(iprot); + xfer += this->success[_i1718].read(iprot); } xfer += iprot->readListEnd(); } @@ -32035,10 +32035,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1675; - for (_iter1675 = this->success.begin(); _iter1675 != this->success.end(); ++_iter1675) + std::vector ::const_iterator _iter1719; + for (_iter1719 = this->success.begin(); _iter1719 != this->success.end(); ++_iter1719) { - xfer += (*_iter1675).write(oprot); + xfer += (*_iter1719).write(oprot); } xfer += oprot->writeListEnd(); } @@ -32083,14 +32083,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1676; - ::apache::thrift::protocol::TType _etype1679; - xfer += iprot->readListBegin(_etype1679, _size1676); - (*(this->success)).resize(_size1676); - uint32_t _i1680; - for (_i1680 = 0; _i1680 < _size1676; ++_i1680) + uint32_t _size1720; + ::apache::thrift::protocol::TType _etype1723; + xfer += iprot->readListBegin(_etype1723, _size1720); + (*(this->success)).resize(_size1720); + uint32_t _i1724; + for (_i1724 = 0; _i1724 < _size1720; ++_i1724) { - xfer += (*(this->success))[_i1680].read(iprot); + xfer += (*(this->success))[_i1724].read(iprot); } xfer += iprot->readListEnd(); } @@ -32778,14 +32778,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1681; - ::apache::thrift::protocol::TType _etype1684; - xfer += iprot->readListBegin(_etype1684, _size1681); - this->group_names.resize(_size1681); - uint32_t _i1685; - for (_i1685 = 0; _i1685 < _size1681; ++_i1685) + uint32_t _size1725; + ::apache::thrift::protocol::TType _etype1728; + xfer += iprot->readListBegin(_etype1728, _size1725); + this->group_names.resize(_size1725); + uint32_t _i1729; + for (_i1729 = 0; _i1729 < _size1725; ++_i1729) { - xfer += iprot->readString(this->group_names[_i1685]); + xfer += iprot->readString(this->group_names[_i1729]); } xfer += iprot->readListEnd(); } @@ -32818,10 +32818,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1686; - for (_iter1686 = this->group_names.begin(); _iter1686 != this->group_names.end(); ++_iter1686) + std::vector ::const_iterator _iter1730; + for (_iter1730 = this->group_names.begin(); _iter1730 != this->group_names.end(); ++_iter1730) { - xfer += oprot->writeString((*_iter1686)); + xfer += oprot->writeString((*_iter1730)); } xfer += oprot->writeListEnd(); } @@ -32849,10 +32849,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1687; - for (_iter1687 = (*(this->group_names)).begin(); _iter1687 != (*(this->group_names)).end(); ++_iter1687) + std::vector ::const_iterator _iter1731; + for (_iter1731 = (*(this->group_names)).begin(); _iter1731 != (*(this->group_names)).end(); ++_iter1731) { - xfer += oprot->writeString((*_iter1687)); + xfer += oprot->writeString((*_iter1731)); } xfer += oprot->writeListEnd(); } @@ -32893,14 +32893,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1688; - ::apache::thrift::protocol::TType _etype1691; - xfer += iprot->readListBegin(_etype1691, _size1688); - this->success.resize(_size1688); - uint32_t _i1692; - for (_i1692 = 0; _i1692 < _size1688; ++_i1692) + uint32_t _size1732; + ::apache::thrift::protocol::TType _etype1735; + xfer += iprot->readListBegin(_etype1735, _size1732); + this->success.resize(_size1732); + uint32_t _i1736; + for (_i1736 = 0; _i1736 < _size1732; ++_i1736) { - xfer += iprot->readString(this->success[_i1692]); + xfer += iprot->readString(this->success[_i1736]); } xfer += iprot->readListEnd(); } @@ -32939,10 +32939,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1693; - for (_iter1693 = this->success.begin(); _iter1693 != this->success.end(); ++_iter1693) + std::vector ::const_iterator _iter1737; + for (_iter1737 = this->success.begin(); _iter1737 != this->success.end(); ++_iter1737) { - xfer += oprot->writeString((*_iter1693)); + xfer += oprot->writeString((*_iter1737)); } xfer += oprot->writeListEnd(); } @@ -32987,14 +32987,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1694; - ::apache::thrift::protocol::TType _etype1697; - xfer += iprot->readListBegin(_etype1697, _size1694); - (*(this->success)).resize(_size1694); - uint32_t _i1698; - for (_i1698 = 0; _i1698 < _size1694; ++_i1698) + uint32_t _size1738; + ::apache::thrift::protocol::TType _etype1741; + xfer += iprot->readListBegin(_etype1741, _size1738); + (*(this->success)).resize(_size1738); + uint32_t _i1742; + for (_i1742 = 0; _i1742 < _size1738; ++_i1742) { - xfer += iprot->readString((*(this->success))[_i1698]); + xfer += iprot->readString((*(this->success))[_i1742]); } xfer += iprot->readListEnd(); } @@ -34305,14 +34305,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1699; - ::apache::thrift::protocol::TType _etype1702; - xfer += iprot->readListBegin(_etype1702, _size1699); - this->success.resize(_size1699); - uint32_t _i1703; - for (_i1703 = 0; _i1703 < _size1699; ++_i1703) + uint32_t _size1743; + ::apache::thrift::protocol::TType _etype1746; + xfer += iprot->readListBegin(_etype1746, _size1743); + this->success.resize(_size1743); + uint32_t _i1747; + for (_i1747 = 0; _i1747 < _size1743; ++_i1747) { - xfer += iprot->readString(this->success[_i1703]); + xfer += iprot->readString(this->success[_i1747]); } xfer += iprot->readListEnd(); } @@ -34343,10 +34343,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1704; - for (_iter1704 = this->success.begin(); _iter1704 != this->success.end(); ++_iter1704) + std::vector ::const_iterator _iter1748; + for (_iter1748 = this->success.begin(); _iter1748 != this->success.end(); ++_iter1748) { - xfer += oprot->writeString((*_iter1704)); + xfer += oprot->writeString((*_iter1748)); } xfer += oprot->writeListEnd(); } @@ -34387,14 +34387,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1705; - ::apache::thrift::protocol::TType _etype1708; - xfer += iprot->readListBegin(_etype1708, _size1705); - (*(this->success)).resize(_size1705); - uint32_t _i1709; - for (_i1709 = 0; _i1709 < _size1705; ++_i1709) + uint32_t _size1749; + ::apache::thrift::protocol::TType _etype1752; + xfer += iprot->readListBegin(_etype1752, _size1749); + (*(this->success)).resize(_size1749); + uint32_t _i1753; + for (_i1753 = 0; _i1753 < _size1749; ++_i1753) { - xfer += iprot->readString((*(this->success))[_i1709]); + xfer += iprot->readString((*(this->success))[_i1753]); } xfer += iprot->readListEnd(); } @@ -35120,14 +35120,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1710; - ::apache::thrift::protocol::TType _etype1713; - xfer += iprot->readListBegin(_etype1713, _size1710); - this->success.resize(_size1710); - uint32_t _i1714; - for (_i1714 = 0; _i1714 < _size1710; ++_i1714) + uint32_t _size1754; + ::apache::thrift::protocol::TType _etype1757; + xfer += iprot->readListBegin(_etype1757, _size1754); + this->success.resize(_size1754); + uint32_t _i1758; + for (_i1758 = 0; _i1758 < _size1754; ++_i1758) { - xfer += iprot->readString(this->success[_i1714]); + xfer += iprot->readString(this->success[_i1758]); } xfer += iprot->readListEnd(); } @@ -35158,10 +35158,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1715; - for (_iter1715 = this->success.begin(); _iter1715 != this->success.end(); ++_iter1715) + std::vector ::const_iterator _iter1759; + for (_iter1759 = this->success.begin(); _iter1759 != this->success.end(); ++_iter1759) { - xfer += oprot->writeString((*_iter1715)); + xfer += oprot->writeString((*_iter1759)); } xfer += oprot->writeListEnd(); } @@ -35202,14 +35202,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1716; - ::apache::thrift::protocol::TType _etype1719; - xfer += iprot->readListBegin(_etype1719, _size1716); - (*(this->success)).resize(_size1716); - uint32_t _i1720; - for (_i1720 = 0; _i1720 < _size1716; ++_i1720) + uint32_t _size1760; + ::apache::thrift::protocol::TType _etype1763; + xfer += iprot->readListBegin(_etype1763, _size1760); + (*(this->success)).resize(_size1760); + uint32_t _i1764; + for (_i1764 = 0; _i1764 < _size1760; ++_i1764) { - xfer += iprot->readString((*(this->success))[_i1720]); + xfer += iprot->readString((*(this->success))[_i1764]); } xfer += iprot->readListEnd(); } @@ -36331,11 +36331,11 @@ uint32_t ThriftHiveMetastore_commit_txn_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_lock_args::~ThriftHiveMetastore_lock_args() throw() { +ThriftHiveMetastore_get_open_write_ids_args::~ThriftHiveMetastore_get_open_write_ids_args() throw() { } -uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_open_write_ids_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36376,10 +36376,10 @@ uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtoc return xfer; } -uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_open_write_ids_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_write_ids_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -36391,14 +36391,14 @@ uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProto } -ThriftHiveMetastore_lock_pargs::~ThriftHiveMetastore_lock_pargs() throw() { +ThriftHiveMetastore_get_open_write_ids_pargs::~ThriftHiveMetastore_get_open_write_ids_pargs() throw() { } -uint32_t ThriftHiveMetastore_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_open_write_ids_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_write_ids_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -36410,11 +36410,11 @@ uint32_t ThriftHiveMetastore_lock_pargs::write(::apache::thrift::protocol::TProt } -ThriftHiveMetastore_lock_result::~ThriftHiveMetastore_lock_result() throw() { +ThriftHiveMetastore_get_open_write_ids_result::~ThriftHiveMetastore_get_open_write_ids_result() throw() { } -uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_open_write_ids_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36471,11 +36471,11 @@ uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProt return xfer; } -uint32_t ThriftHiveMetastore_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_open_write_ids_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_write_ids_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -36496,11 +36496,11 @@ uint32_t ThriftHiveMetastore_lock_result::write(::apache::thrift::protocol::TPro } -ThriftHiveMetastore_lock_presult::~ThriftHiveMetastore_lock_presult() throw() { +ThriftHiveMetastore_get_open_write_ids_presult::~ThriftHiveMetastore_get_open_write_ids_presult() throw() { } -uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_open_write_ids_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36558,11 +36558,11 @@ uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TPro } -ThriftHiveMetastore_check_lock_args::~ThriftHiveMetastore_check_lock_args() throw() { +ThriftHiveMetastore_add_transactional_table_args::~ThriftHiveMetastore_add_transactional_table_args() throw() { } -uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_add_transactional_table_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36603,10 +36603,10 @@ uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_check_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_add_transactional_table_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_transactional_table_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -36618,14 +36618,14 @@ uint32_t ThriftHiveMetastore_check_lock_args::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_check_lock_pargs::~ThriftHiveMetastore_check_lock_pargs() throw() { +ThriftHiveMetastore_add_transactional_table_pargs::~ThriftHiveMetastore_add_transactional_table_pargs() throw() { } -uint32_t ThriftHiveMetastore_check_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_add_transactional_table_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_transactional_table_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -36637,11 +36637,11 @@ uint32_t ThriftHiveMetastore_check_lock_pargs::write(::apache::thrift::protocol: } -ThriftHiveMetastore_check_lock_result::~ThriftHiveMetastore_check_lock_result() throw() { +ThriftHiveMetastore_add_transactional_table_result::~ThriftHiveMetastore_add_transactional_table_result() throw() { } -uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_add_transactional_table_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36662,14 +36662,6 @@ uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol: } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -36678,22 +36670,6 @@ uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol: xfer += iprot->skip(ftype); } break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -36706,28 +36682,16 @@ uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_check_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_add_transactional_table_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_transactional_table_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { + if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -36735,11 +36699,11 @@ uint32_t ThriftHiveMetastore_check_lock_result::write(::apache::thrift::protocol } -ThriftHiveMetastore_check_lock_presult::~ThriftHiveMetastore_check_lock_presult() throw() { +ThriftHiveMetastore_add_transactional_table_presult::~ThriftHiveMetastore_add_transactional_table_presult() throw() { } -uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_add_transactional_table_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36760,14 +36724,6 @@ uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -36776,22 +36732,6 @@ uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol xfer += iprot->skip(ftype); } break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -36805,11 +36745,11 @@ uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_unlock_args::~ThriftHiveMetastore_unlock_args() throw() { +ThriftHiveMetastore_allocate_table_write_id_args::~ThriftHiveMetastore_allocate_table_write_id_args() throw() { } -uint32_t ThriftHiveMetastore_unlock_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_allocate_table_write_id_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36850,10 +36790,10 @@ uint32_t ThriftHiveMetastore_unlock_args::read(::apache::thrift::protocol::TProt return xfer; } -uint32_t ThriftHiveMetastore_unlock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_allocate_table_write_id_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_allocate_table_write_id_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -36865,14 +36805,14 @@ uint32_t ThriftHiveMetastore_unlock_args::write(::apache::thrift::protocol::TPro } -ThriftHiveMetastore_unlock_pargs::~ThriftHiveMetastore_unlock_pargs() throw() { +ThriftHiveMetastore_allocate_table_write_id_pargs::~ThriftHiveMetastore_allocate_table_write_id_pargs() throw() { } -uint32_t ThriftHiveMetastore_unlock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_allocate_table_write_id_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_allocate_table_write_id_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -36884,11 +36824,11 @@ uint32_t ThriftHiveMetastore_unlock_pargs::write(::apache::thrift::protocol::TPr } -ThriftHiveMetastore_unlock_result::~ThriftHiveMetastore_unlock_result() throw() { +ThriftHiveMetastore_allocate_table_write_id_result::~ThriftHiveMetastore_allocate_table_write_id_result() throw() { } -uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_allocate_table_write_id_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36909,6 +36849,14 @@ uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TPr } switch (fid) { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -36925,6 +36873,14 @@ uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TPr xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -36937,13 +36893,17 @@ uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TPr return xfer; } -uint32_t ThriftHiveMetastore_unlock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_allocate_table_write_id_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_allocate_table_write_id_result"); - if (this->__isset.o1) { + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); @@ -36951,6 +36911,10 @@ uint32_t ThriftHiveMetastore_unlock_result::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -36958,11 +36922,11 @@ uint32_t ThriftHiveMetastore_unlock_result::write(::apache::thrift::protocol::TP } -ThriftHiveMetastore_unlock_presult::~ThriftHiveMetastore_unlock_presult() throw() { +ThriftHiveMetastore_allocate_table_write_id_presult::~ThriftHiveMetastore_allocate_table_write_id_presult() throw() { } -uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_allocate_table_write_id_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -36983,6 +36947,14 @@ uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TP } switch (fid) { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -36999,6 +36971,14 @@ uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TP xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -37012,11 +36992,11 @@ uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TP } -ThriftHiveMetastore_show_locks_args::~ThriftHiveMetastore_show_locks_args() throw() { +ThriftHiveMetastore_lock_args::~ThriftHiveMetastore_lock_args() throw() { } -uint32_t ThriftHiveMetastore_show_locks_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37057,10 +37037,10 @@ uint32_t ThriftHiveMetastore_show_locks_args::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_show_locks_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -37072,14 +37052,14 @@ uint32_t ThriftHiveMetastore_show_locks_args::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_show_locks_pargs::~ThriftHiveMetastore_show_locks_pargs() throw() { +ThriftHiveMetastore_lock_pargs::~ThriftHiveMetastore_lock_pargs() throw() { } -uint32_t ThriftHiveMetastore_show_locks_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -37091,11 +37071,11 @@ uint32_t ThriftHiveMetastore_show_locks_pargs::write(::apache::thrift::protocol: } -ThriftHiveMetastore_show_locks_result::~ThriftHiveMetastore_show_locks_result() throw() { +ThriftHiveMetastore_lock_result::~ThriftHiveMetastore_lock_result() throw() { } -uint32_t ThriftHiveMetastore_show_locks_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37124,6 +37104,22 @@ uint32_t ThriftHiveMetastore_show_locks_result::read(::apache::thrift::protocol: xfer += iprot->skip(ftype); } break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -37136,16 +37132,24 @@ uint32_t ThriftHiveMetastore_show_locks_result::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_show_locks_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -37153,11 +37157,11 @@ uint32_t ThriftHiveMetastore_show_locks_result::write(::apache::thrift::protocol } -ThriftHiveMetastore_show_locks_presult::~ThriftHiveMetastore_show_locks_presult() throw() { +ThriftHiveMetastore_lock_presult::~ThriftHiveMetastore_lock_presult() throw() { } -uint32_t ThriftHiveMetastore_show_locks_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37186,6 +37190,22 @@ uint32_t ThriftHiveMetastore_show_locks_presult::read(::apache::thrift::protocol xfer += iprot->skip(ftype); } break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -37199,11 +37219,11 @@ uint32_t ThriftHiveMetastore_show_locks_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_heartbeat_args::~ThriftHiveMetastore_heartbeat_args() throw() { +ThriftHiveMetastore_check_lock_args::~ThriftHiveMetastore_check_lock_args() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37226,8 +37246,8 @@ uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TP { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ids.read(iprot); - this->__isset.ids = true; + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; } else { xfer += iprot->skip(ftype); } @@ -37244,13 +37264,13 @@ uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TP return xfer; } -uint32_t ThriftHiveMetastore_heartbeat_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_check_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_args"); - xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->ids.write(oprot); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -37259,17 +37279,17 @@ uint32_t ThriftHiveMetastore_heartbeat_args::write(::apache::thrift::protocol::T } -ThriftHiveMetastore_heartbeat_pargs::~ThriftHiveMetastore_heartbeat_pargs() throw() { +ThriftHiveMetastore_check_lock_pargs::~ThriftHiveMetastore_check_lock_pargs() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_check_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_pargs"); - xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->ids)).write(oprot); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -37278,11 +37298,11 @@ uint32_t ThriftHiveMetastore_heartbeat_pargs::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_heartbeat_result::~ThriftHiveMetastore_heartbeat_result() throw() { +ThriftHiveMetastore_check_lock_result::~ThriftHiveMetastore_check_lock_result() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37303,6 +37323,14 @@ uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol:: } switch (fid) { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -37339,13 +37367,17 @@ uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol:: return xfer; } -uint32_t ThriftHiveMetastore_heartbeat_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_check_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_result"); - if (this->__isset.o1) { + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); @@ -37364,11 +37396,11 @@ uint32_t ThriftHiveMetastore_heartbeat_result::write(::apache::thrift::protocol: } -ThriftHiveMetastore_heartbeat_presult::~ThriftHiveMetastore_heartbeat_presult() throw() { +ThriftHiveMetastore_check_lock_presult::~ThriftHiveMetastore_check_lock_presult() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37389,6 +37421,14 @@ uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol: } switch (fid) { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -37426,11 +37466,11 @@ uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol: } -ThriftHiveMetastore_heartbeat_txn_range_args::~ThriftHiveMetastore_heartbeat_txn_range_args() throw() { +ThriftHiveMetastore_unlock_args::~ThriftHiveMetastore_unlock_args() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_unlock_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37453,8 +37493,8 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::pr { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->txns.read(iprot); - this->__isset.txns = true; + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; } else { xfer += iprot->skip(ftype); } @@ -37471,13 +37511,13 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::pr return xfer; } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_unlock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_args"); - xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->txns.write(oprot); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -37486,17 +37526,17 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::write(::apache::thrift::p } -ThriftHiveMetastore_heartbeat_txn_range_pargs::~ThriftHiveMetastore_heartbeat_txn_range_pargs() throw() { +ThriftHiveMetastore_unlock_pargs::~ThriftHiveMetastore_unlock_pargs() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_unlock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_pargs"); - xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->txns)).write(oprot); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -37505,11 +37545,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_pargs::write(::apache::thrift:: } -ThriftHiveMetastore_heartbeat_txn_range_result::~ThriftHiveMetastore_heartbeat_txn_range_result() throw() { +ThriftHiveMetastore_unlock_result::~ThriftHiveMetastore_unlock_result() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37530,10 +37570,18 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::read(::apache::thrift:: } switch (fid) { - case 0: + case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); - this->__isset.success = true; + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; } else { xfer += iprot->skip(ftype); } @@ -37550,15 +37598,19 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::read(::apache::thrift:: return xfer; } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_unlock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); @@ -37567,11 +37619,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::write(::apache::thrift: } -ThriftHiveMetastore_heartbeat_txn_range_presult::~ThriftHiveMetastore_heartbeat_txn_range_presult() throw() { +ThriftHiveMetastore_unlock_presult::~ThriftHiveMetastore_unlock_presult() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37592,10 +37644,18 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_presult::read(::apache::thrift: } switch (fid) { - case 0: + case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); - this->__isset.success = true; + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; } else { xfer += iprot->skip(ftype); } @@ -37613,11 +37673,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_presult::read(::apache::thrift: } -ThriftHiveMetastore_compact_args::~ThriftHiveMetastore_compact_args() throw() { +ThriftHiveMetastore_show_locks_args::~ThriftHiveMetastore_show_locks_args() throw() { } -uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_locks_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37658,10 +37718,10 @@ uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TPro return xfer; } -uint32_t ThriftHiveMetastore_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_locks_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -37673,14 +37733,14 @@ uint32_t ThriftHiveMetastore_compact_args::write(::apache::thrift::protocol::TPr } -ThriftHiveMetastore_compact_pargs::~ThriftHiveMetastore_compact_pargs() throw() { +ThriftHiveMetastore_show_locks_pargs::~ThriftHiveMetastore_show_locks_pargs() throw() { } -uint32_t ThriftHiveMetastore_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_locks_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -37692,88 +37752,11 @@ uint32_t ThriftHiveMetastore_compact_pargs::write(::apache::thrift::protocol::TP } -ThriftHiveMetastore_compact_result::~ThriftHiveMetastore_compact_result() throw() { -} - - -uint32_t ThriftHiveMetastore_compact_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - xfer += iprot->skip(ftype); - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_compact_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_result"); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_compact_presult::~ThriftHiveMetastore_compact_presult() throw() { -} - - -uint32_t ThriftHiveMetastore_compact_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - xfer += iprot->skip(ftype); - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - - -ThriftHiveMetastore_compact2_args::~ThriftHiveMetastore_compact2_args() throw() { +ThriftHiveMetastore_show_locks_result::~ThriftHiveMetastore_show_locks_result() throw() { } -uint32_t ThriftHiveMetastore_compact2_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_locks_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -37794,10 +37777,10 @@ uint32_t ThriftHiveMetastore_compact2_args::read(::apache::thrift::protocol::TPr } switch (fid) { - case 1: + case 0: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->rqst.read(iprot); - this->__isset.rqst = true; + xfer += this->success.read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } @@ -37814,45 +37797,723 @@ uint32_t ThriftHiveMetastore_compact2_args::read(::apache::thrift::protocol::TPr return xfer; } -uint32_t ThriftHiveMetastore_compact2_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact2_args"); - - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->rqst.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_compact2_pargs::~ThriftHiveMetastore_compact2_pargs() throw() { -} - +uint32_t ThriftHiveMetastore_show_locks_result::write(::apache::thrift::protocol::TProtocol* oprot) const { -uint32_t ThriftHiveMetastore_compact2_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact2_pargs"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->rqst)).write(oprot); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_result"); + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -ThriftHiveMetastore_compact2_result::~ThriftHiveMetastore_compact2_result() throw() { +ThriftHiveMetastore_show_locks_presult::~ThriftHiveMetastore_show_locks_presult() throw() { } -uint32_t ThriftHiveMetastore_compact2_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_locks_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_heartbeat_args::~ThriftHiveMetastore_heartbeat_args() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ids.read(iprot); + this->__isset.ids = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_heartbeat_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_args"); + + xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->ids.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_heartbeat_pargs::~ThriftHiveMetastore_heartbeat_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_pargs"); + + xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->ids)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_heartbeat_result::~ThriftHiveMetastore_heartbeat_result() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_heartbeat_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_heartbeat_presult::~ThriftHiveMetastore_heartbeat_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_heartbeat_txn_range_args::~ThriftHiveMetastore_heartbeat_txn_range_args() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->txns.read(iprot); + this->__isset.txns = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_args"); + + xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->txns.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_heartbeat_txn_range_pargs::~ThriftHiveMetastore_heartbeat_txn_range_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_txn_range_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_pargs"); + + xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->txns)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_heartbeat_txn_range_result::~ThriftHiveMetastore_heartbeat_txn_range_result() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_heartbeat_txn_range_presult::~ThriftHiveMetastore_heartbeat_txn_range_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_txn_range_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_compact_args::~ThriftHiveMetastore_compact_args() throw() { +} + + +uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_args"); + + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_compact_pargs::~ThriftHiveMetastore_compact_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_pargs"); + + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_compact_result::~ThriftHiveMetastore_compact_result() throw() { +} + + +uint32_t ThriftHiveMetastore_compact_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_compact_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_result"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_compact_presult::~ThriftHiveMetastore_compact_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_compact_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_compact2_args::~ThriftHiveMetastore_compact2_args() throw() { +} + + +uint32_t ThriftHiveMetastore_compact2_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_compact2_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact2_args"); + + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_compact2_pargs::~ThriftHiveMetastore_compact2_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_compact2_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact2_pargs"); + + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_compact2_result::~ThriftHiveMetastore_compact2_result() throw() { +} + + +uint32_t ThriftHiveMetastore_compact2_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -38379,14 +39040,14 @@ uint32_t ThriftHiveMetastore_get_last_completed_transaction_for_tables_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->db_names.clear(); - uint32_t _size1721; - ::apache::thrift::protocol::TType _etype1724; - xfer += iprot->readListBegin(_etype1724, _size1721); - this->db_names.resize(_size1721); - uint32_t _i1725; - for (_i1725 = 0; _i1725 < _size1721; ++_i1725) + uint32_t _size1765; + ::apache::thrift::protocol::TType _etype1768; + xfer += iprot->readListBegin(_etype1768, _size1765); + this->db_names.resize(_size1765); + uint32_t _i1769; + for (_i1769 = 0; _i1769 < _size1765; ++_i1769) { - xfer += iprot->readString(this->db_names[_i1725]); + xfer += iprot->readString(this->db_names[_i1769]); } xfer += iprot->readListEnd(); } @@ -38399,14 +39060,14 @@ uint32_t ThriftHiveMetastore_get_last_completed_transaction_for_tables_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->table_names.clear(); - uint32_t _size1726; - ::apache::thrift::protocol::TType _etype1729; - xfer += iprot->readListBegin(_etype1729, _size1726); - this->table_names.resize(_size1726); - uint32_t _i1730; - for (_i1730 = 0; _i1730 < _size1726; ++_i1730) + uint32_t _size1770; + ::apache::thrift::protocol::TType _etype1773; + xfer += iprot->readListBegin(_etype1773, _size1770); + this->table_names.resize(_size1770); + uint32_t _i1774; + for (_i1774 = 0; _i1774 < _size1770; ++_i1774) { - xfer += iprot->readString(this->table_names[_i1730]); + xfer += iprot->readString(this->table_names[_i1774]); } xfer += iprot->readListEnd(); } @@ -38443,10 +39104,10 @@ uint32_t ThriftHiveMetastore_get_last_completed_transaction_for_tables_args::wri xfer += oprot->writeFieldBegin("db_names", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->db_names.size())); - std::vector ::const_iterator _iter1731; - for (_iter1731 = this->db_names.begin(); _iter1731 != this->db_names.end(); ++_iter1731) + std::vector ::const_iterator _iter1775; + for (_iter1775 = this->db_names.begin(); _iter1775 != this->db_names.end(); ++_iter1775) { - xfer += oprot->writeString((*_iter1731)); + xfer += oprot->writeString((*_iter1775)); } xfer += oprot->writeListEnd(); } @@ -38455,10 +39116,10 @@ uint32_t ThriftHiveMetastore_get_last_completed_transaction_for_tables_args::wri xfer += oprot->writeFieldBegin("table_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->table_names.size())); - std::vector ::const_iterator _iter1732; - for (_iter1732 = this->table_names.begin(); _iter1732 != this->table_names.end(); ++_iter1732) + std::vector ::const_iterator _iter1776; + for (_iter1776 = this->table_names.begin(); _iter1776 != this->table_names.end(); ++_iter1776) { - xfer += oprot->writeString((*_iter1732)); + xfer += oprot->writeString((*_iter1776)); } xfer += oprot->writeListEnd(); } @@ -38486,10 +39147,10 @@ uint32_t ThriftHiveMetastore_get_last_completed_transaction_for_tables_pargs::wr xfer += oprot->writeFieldBegin("db_names", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->db_names)).size())); - std::vector ::const_iterator _iter1733; - for (_iter1733 = (*(this->db_names)).begin(); _iter1733 != (*(this->db_names)).end(); ++_iter1733) + std::vector ::const_iterator _iter1777; + for (_iter1777 = (*(this->db_names)).begin(); _iter1777 != (*(this->db_names)).end(); ++_iter1777) { - xfer += oprot->writeString((*_iter1733)); + xfer += oprot->writeString((*_iter1777)); } xfer += oprot->writeListEnd(); } @@ -38498,10 +39159,10 @@ uint32_t ThriftHiveMetastore_get_last_completed_transaction_for_tables_pargs::wr xfer += oprot->writeFieldBegin("table_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->table_names)).size())); - std::vector ::const_iterator _iter1734; - for (_iter1734 = (*(this->table_names)).begin(); _iter1734 != (*(this->table_names)).end(); ++_iter1734) + std::vector ::const_iterator _iter1778; + for (_iter1778 = (*(this->table_names)).begin(); _iter1778 != (*(this->table_names)).end(); ++_iter1778) { - xfer += oprot->writeString((*_iter1734)); + xfer += oprot->writeString((*_iter1778)); } xfer += oprot->writeListEnd(); } @@ -38546,14 +39207,14 @@ uint32_t ThriftHiveMetastore_get_last_completed_transaction_for_tables_result::r if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1735; - ::apache::thrift::protocol::TType _etype1738; - xfer += iprot->readListBegin(_etype1738, _size1735); - this->success.resize(_size1735); - uint32_t _i1739; - for (_i1739 = 0; _i1739 < _size1735; ++_i1739) + uint32_t _size1779; + ::apache::thrift::protocol::TType _etype1782; + xfer += iprot->readListBegin(_etype1782, _size1779); + this->success.resize(_size1779); + uint32_t _i1783; + for (_i1783 = 0; _i1783 < _size1779; ++_i1783) { - xfer += this->success[_i1739].read(iprot); + xfer += this->success[_i1783].read(iprot); } xfer += iprot->readListEnd(); } @@ -38584,10 +39245,10 @@ uint32_t ThriftHiveMetastore_get_last_completed_transaction_for_tables_result::w xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1740; - for (_iter1740 = this->success.begin(); _iter1740 != this->success.end(); ++_iter1740) + std::vector ::const_iterator _iter1784; + for (_iter1784 = this->success.begin(); _iter1784 != this->success.end(); ++_iter1784) { - xfer += (*_iter1740).write(oprot); + xfer += (*_iter1784).write(oprot); } xfer += oprot->writeListEnd(); } @@ -38628,14 +39289,14 @@ uint32_t ThriftHiveMetastore_get_last_completed_transaction_for_tables_presult:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1741; - ::apache::thrift::protocol::TType _etype1744; - xfer += iprot->readListBegin(_etype1744, _size1741); - (*(this->success)).resize(_size1741); - uint32_t _i1745; - for (_i1745 = 0; _i1745 < _size1741; ++_i1745) + uint32_t _size1785; + ::apache::thrift::protocol::TType _etype1788; + xfer += iprot->readListBegin(_etype1788, _size1785); + (*(this->success)).resize(_size1785); + uint32_t _i1789; + for (_i1789 = 0; _i1789 < _size1785; ++_i1789) { - xfer += (*(this->success))[_i1745].read(iprot); + xfer += (*(this->success))[_i1789].read(iprot); } xfer += iprot->readListEnd(); } @@ -54302,6 +54963,193 @@ void ThriftHiveMetastoreClient::recv_commit_txn() return; } +void ThriftHiveMetastoreClient::get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst) +{ + send_get_open_write_ids(rqst); + recv_get_open_write_ids(_return); +} + +void ThriftHiveMetastoreClient::send_get_open_write_ids(const GetOpenWriteIdsRequest& rqst) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_open_write_ids", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_open_write_ids_pargs args; + args.rqst = &rqst; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_open_write_ids(GetOpenWriteIdsResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_open_write_ids") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_open_write_ids_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_write_ids failed: unknown result"); +} + +void ThriftHiveMetastoreClient::add_transactional_table(const AddTransactionalTableRequest& rqst) +{ + send_add_transactional_table(rqst); + recv_add_transactional_table(); +} + +void ThriftHiveMetastoreClient::send_add_transactional_table(const AddTransactionalTableRequest& rqst) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("add_transactional_table", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_add_transactional_table_pargs args; + args.rqst = &rqst; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_add_transactional_table() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("add_transactional_table") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_add_transactional_table_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + return; +} + +void ThriftHiveMetastoreClient::allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst) +{ + send_allocate_table_write_id(rqst); + recv_allocate_table_write_id(_return); +} + +void ThriftHiveMetastoreClient::send_allocate_table_write_id(const AllocateTableWriteIdRequest& rqst) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("allocate_table_write_id", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_allocate_table_write_id_pargs args; + args.rqst = &rqst; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_allocate_table_write_id(AllocateTableWriteIdResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("allocate_table_write_id") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_allocate_table_write_id_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "allocate_table_write_id failed: unknown result"); +} + void ThriftHiveMetastoreClient::lock(LockResponse& _return, const LockRequest& rqst) { send_lock(rqst); @@ -65327,6 +66175,185 @@ void ThriftHiveMetastoreProcessor::process_commit_txn(int32_t seqid, ::apache::t } } +void ThriftHiveMetastoreProcessor::process_get_open_write_ids(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_open_write_ids", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_open_write_ids"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_open_write_ids"); + } + + ThriftHiveMetastore_get_open_write_ids_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_open_write_ids", bytes); + } + + ThriftHiveMetastore_get_open_write_ids_result result; + try { + iface_->get_open_write_ids(result.success, args.rqst); + result.__isset.success = true; + } catch (NoSuchTxnException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_open_write_ids"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_open_write_ids", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_open_write_ids"); + } + + oprot->writeMessageBegin("get_open_write_ids", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_open_write_ids", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_add_transactional_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_transactional_table", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_transactional_table"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_transactional_table"); + } + + ThriftHiveMetastore_add_transactional_table_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_transactional_table", bytes); + } + + ThriftHiveMetastore_add_transactional_table_result result; + try { + iface_->add_transactional_table(args.rqst); + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_transactional_table"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("add_transactional_table", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_transactional_table"); + } + + oprot->writeMessageBegin("add_transactional_table", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_transactional_table", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_allocate_table_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.allocate_table_write_id", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.allocate_table_write_id"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.allocate_table_write_id"); + } + + ThriftHiveMetastore_allocate_table_write_id_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.allocate_table_write_id", bytes); + } + + ThriftHiveMetastore_allocate_table_write_id_result result; + try { + iface_->allocate_table_write_id(result.success, args.rqst); + result.__isset.success = true; + } catch (NoSuchTxnException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (TxnAbortedException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.allocate_table_write_id"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("allocate_table_write_id", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.allocate_table_write_id"); + } + + oprot->writeMessageBegin("allocate_table_write_id", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.allocate_table_write_id", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_lock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; @@ -79875,7 +80902,266 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_token_identifiers") != 0) { + if (fname.compare("get_all_token_identifiers") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_all_token_identifiers_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_token_identifiers failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +int32_t ThriftHiveMetastoreConcurrentClient::add_master_key(const std::string& key) +{ + int32_t seqid = send_add_master_key(key); + return recv_add_master_key(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_add_master_key(const std::string& key) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("add_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_add_master_key_pargs args; + args.key = &key; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("add_master_key") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + int32_t _return; + ThriftHiveMetastore_add_master_key_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + sentry.commit(); + return _return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_master_key failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::update_master_key(const int32_t seq_number, const std::string& key) +{ + int32_t seqid = send_update_master_key(seq_number, key); + recv_update_master_key(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_update_master_key(const int32_t seq_number, const std::string& key) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_update_master_key_pargs args; + args.seq_number = &seq_number; + args.key = &key; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("update_master_key") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_update_master_key_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +bool ThriftHiveMetastoreConcurrentClient::remove_master_key(const int32_t key_seq) +{ + int32_t seqid = send_remove_master_key(key_seq); + return recv_remove_master_key(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_remove_master_key(const int32_t key_seq) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_remove_master_key_pargs args; + args.key_seq = &key_seq; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("remove_master_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -79884,19 +81170,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_all_token_identifiers_presult result; + bool _return; + ThriftHiveMetastore_remove_master_key_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_token_identifiers failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_master_key failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -79906,20 +81192,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve } // end while(true) } -int32_t ThriftHiveMetastoreConcurrentClient::add_master_key(const std::string& key) +void ThriftHiveMetastoreConcurrentClient::get_master_keys(std::vector & _return) { - int32_t seqid = send_add_master_key(key); - return recv_add_master_key(seqid); + int32_t seqid = send_get_master_keys(); + recv_get_master_keys(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_master_key(const std::string& key) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_master_keys() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_master_key_pargs args; - args.key = &key; + ThriftHiveMetastore_get_master_keys_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -79930,7 +81215,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_master_key(const std::stri return cseqid; } -int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -79959,7 +81244,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_master_key") != 0) { + if (fname.compare("get_master_keys") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -79968,23 +81253,19 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int32_t _return; - ThriftHiveMetastore_add_master_key_presult result; + ThriftHiveMetastore_get_master_keys_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; - } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; + return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_master_key failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_master_keys failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -79994,21 +81275,19 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t s } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::update_master_key(const int32_t seq_number, const std::string& key) +void ThriftHiveMetastoreConcurrentClient::get_open_txns(GetOpenTxnsResponse& _return) { - int32_t seqid = send_update_master_key(seq_number, key); - recv_update_master_key(seqid); + int32_t seqid = send_get_open_txns(); + recv_get_open_txns(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_update_master_key(const int32_t seq_number, const std::string& key) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_update_master_key_pargs args; - args.seq_number = &seq_number; - args.key = &key; + ThriftHiveMetastore_get_open_txns_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -80019,7 +81298,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_update_master_key(const int32_ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -80048,7 +81327,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("update_master_key") != 0) { + if (fname.compare("get_open_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -80057,21 +81336,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_update_master_key_presult result; + ThriftHiveMetastore_get_open_txns_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o2; + return; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -80081,20 +81358,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t s } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::remove_master_key(const int32_t key_seq) +void ThriftHiveMetastoreConcurrentClient::get_open_txns_info(GetOpenTxnsInfoResponse& _return) { - int32_t seqid = send_remove_master_key(key_seq); - return recv_remove_master_key(seqid); + int32_t seqid = send_get_open_txns_info(); + recv_get_open_txns_info(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_remove_master_key(const int32_t key_seq) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns_info() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_remove_master_key_pargs args; - args.key_seq = &key_seq; + ThriftHiveMetastore_get_open_txns_info_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -80105,7 +81381,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_remove_master_key(const int32_ return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInfoResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -80134,7 +81410,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("remove_master_key") != 0) { + if (fname.compare("get_open_txns_info") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -80143,19 +81419,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_remove_master_key_presult result; + ThriftHiveMetastore_get_open_txns_info_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_master_key failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns_info failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -80165,19 +81441,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t s } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_master_keys(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& rqst) { - int32_t seqid = send_get_master_keys(); - recv_get_master_keys(_return, seqid); + int32_t seqid = send_open_txns(rqst); + recv_open_txns(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_master_keys() +int32_t ThriftHiveMetastoreConcurrentClient::send_open_txns(const OpenTxnRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_master_keys_pargs args; + ThriftHiveMetastore_open_txns_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -80188,7 +81465,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_master_keys() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -80217,7 +81494,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_master_keys") != 0) { + if (fname.compare("open_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -80226,7 +81503,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorreadMessageEnd(); @@ -80238,7 +81515,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -80248,19 +81525,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_open_txns_pargs args; + ThriftHiveMetastore_abort_txn_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -80271,7 +81549,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) { int32_t rseqid = 0; @@ -80300,7 +81578,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_open_txns") != 0) { + if (fname.compare("abort_txn") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -80309,19 +81587,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_open_txns_presult result; - result.success = &_return; + ThriftHiveMetastore_abort_txn_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -80331,19 +81607,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_open_txns_info(GetOpenTxnsInfoResponse& _return) +void ThriftHiveMetastoreConcurrentClient::abort_txns(const AbortTxnsRequest& rqst) { - int32_t seqid = send_get_open_txns_info(); - recv_get_open_txns_info(_return, seqid); + int32_t seqid = send_abort_txns(rqst); + recv_abort_txns(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns_info() +int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txns(const AbortTxnsRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_open_txns_info_pargs args; + ThriftHiveMetastore_abort_txns_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -80354,7 +81631,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns_info() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInfoResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) { int32_t rseqid = 0; @@ -80383,7 +81660,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInf iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_open_txns_info") != 0) { + if (fname.compare("abort_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -80392,19 +81669,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInf using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_open_txns_info_presult result; - result.success = &_return; + ThriftHiveMetastore_abort_txns_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns_info failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -80414,19 +81689,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInf } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::commit_txn(const CommitTxnRequest& rqst) { - int32_t seqid = send_open_txns(rqst); - recv_open_txns(_return, seqid); + int32_t seqid = send_commit_txn(rqst); + recv_commit_txn(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_open_txns(const OpenTxnRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_commit_txn(const CommitTxnRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_open_txns_pargs args; + ThriftHiveMetastore_commit_txn_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -80438,7 +81713,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_open_txns(const OpenTxnRequest return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) { int32_t rseqid = 0; @@ -80467,7 +81742,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("open_txns") != 0) { + if (fname.compare("commit_txn") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -80476,19 +81751,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_open_txns_presult result; - result.success = &_return; + ThriftHiveMetastore_commit_txn_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "open_txns failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -80498,19 +81775,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::abort_txn(const AbortTxnRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst) { - int32_t seqid = send_abort_txn(rqst); - recv_abort_txn(seqid); + int32_t seqid = send_get_open_write_ids(rqst); + recv_get_open_write_ids(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txn(const AbortTxnRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_write_ids(const GetOpenWriteIdsRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_open_write_ids", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_abort_txn_pargs args; + ThriftHiveMetastore_get_open_write_ids_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -80522,7 +81799,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txn(const AbortTxnReques return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_open_write_ids(GetOpenWriteIdsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -80551,7 +81828,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("abort_txn") != 0) { + if (fname.compare("get_open_write_ids") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -80560,17 +81837,27 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_abort_txn_presult result; + ThriftHiveMetastore_get_open_write_ids_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - sentry.commit(); - return; + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_write_ids failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -80580,19 +81867,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::abort_txns(const AbortTxnsRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::add_transactional_table(const AddTransactionalTableRequest& rqst) { - int32_t seqid = send_abort_txns(rqst); - recv_abort_txns(seqid); + int32_t seqid = send_add_transactional_table(rqst); + recv_add_transactional_table(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txns(const AbortTxnsRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_transactional_table(const AddTransactionalTableRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_transactional_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_abort_txns_pargs args; + ThriftHiveMetastore_add_transactional_table_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -80604,7 +81891,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txns(const AbortTxnsRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_transactional_table(const int32_t seqid) { int32_t rseqid = 0; @@ -80633,7 +81920,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("abort_txns") != 0) { + if (fname.compare("add_transactional_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -80642,7 +81929,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_abort_txns_presult result; + ThriftHiveMetastore_add_transactional_table_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -80662,19 +81949,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::commit_txn(const CommitTxnRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst) { - int32_t seqid = send_commit_txn(rqst); - recv_commit_txn(seqid); + int32_t seqid = send_allocate_table_write_id(rqst); + recv_allocate_table_write_id(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_commit_txn(const CommitTxnRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_allocate_table_write_id(const AllocateTableWriteIdRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("allocate_table_write_id", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_commit_txn_pargs args; + ThriftHiveMetastore_allocate_table_write_id_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -80686,7 +81973,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_commit_txn(const CommitTxnRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_allocate_table_write_id(AllocateTableWriteIdResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -80715,7 +82002,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("commit_txn") != 0) { + if (fname.compare("allocate_table_write_id") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -80724,11 +82011,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_commit_txn_presult result; + ThriftHiveMetastore_allocate_table_write_id_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -80737,8 +82030,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "allocate_table_write_id failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index 42bc929..40aeab1 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -164,6 +164,9 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void abort_txn(const AbortTxnRequest& rqst) = 0; virtual void abort_txns(const AbortTxnsRequest& rqst) = 0; virtual void commit_txn(const CommitTxnRequest& rqst) = 0; + virtual void get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst) = 0; + virtual void add_transactional_table(const AddTransactionalTableRequest& rqst) = 0; + virtual void allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst) = 0; virtual void lock(LockResponse& _return, const LockRequest& rqst) = 0; virtual void check_lock(LockResponse& _return, const CheckLockRequest& rqst) = 0; virtual void unlock(const UnlockRequest& rqst) = 0; @@ -688,6 +691,15 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void commit_txn(const CommitTxnRequest& /* rqst */) { return; } + void get_open_write_ids(GetOpenWriteIdsResponse& /* _return */, const GetOpenWriteIdsRequest& /* rqst */) { + return; + } + void add_transactional_table(const AddTransactionalTableRequest& /* rqst */) { + return; + } + void allocate_table_write_id(AllocateTableWriteIdResponse& /* _return */, const AllocateTableWriteIdRequest& /* rqst */) { + return; + } void lock(LockResponse& /* _return */, const LockRequest& /* rqst */) { return; } @@ -18704,6 +18716,358 @@ class ThriftHiveMetastore_commit_txn_presult { }; +typedef struct _ThriftHiveMetastore_get_open_write_ids_args__isset { + _ThriftHiveMetastore_get_open_write_ids_args__isset() : rqst(false) {} + bool rqst :1; +} _ThriftHiveMetastore_get_open_write_ids_args__isset; + +class ThriftHiveMetastore_get_open_write_ids_args { + public: + + ThriftHiveMetastore_get_open_write_ids_args(const ThriftHiveMetastore_get_open_write_ids_args&); + ThriftHiveMetastore_get_open_write_ids_args& operator=(const ThriftHiveMetastore_get_open_write_ids_args&); + ThriftHiveMetastore_get_open_write_ids_args() { + } + + virtual ~ThriftHiveMetastore_get_open_write_ids_args() throw(); + GetOpenWriteIdsRequest rqst; + + _ThriftHiveMetastore_get_open_write_ids_args__isset __isset; + + void __set_rqst(const GetOpenWriteIdsRequest& val); + + bool operator == (const ThriftHiveMetastore_get_open_write_ids_args & rhs) const + { + if (!(rqst == rhs.rqst)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_open_write_ids_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_open_write_ids_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_open_write_ids_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_open_write_ids_pargs() throw(); + const GetOpenWriteIdsRequest* rqst; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_open_write_ids_result__isset { + _ThriftHiveMetastore_get_open_write_ids_result__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_open_write_ids_result__isset; + +class ThriftHiveMetastore_get_open_write_ids_result { + public: + + ThriftHiveMetastore_get_open_write_ids_result(const ThriftHiveMetastore_get_open_write_ids_result&); + ThriftHiveMetastore_get_open_write_ids_result& operator=(const ThriftHiveMetastore_get_open_write_ids_result&); + ThriftHiveMetastore_get_open_write_ids_result() { + } + + virtual ~ThriftHiveMetastore_get_open_write_ids_result() throw(); + GetOpenWriteIdsResponse success; + NoSuchTxnException o1; + MetaException o2; + + _ThriftHiveMetastore_get_open_write_ids_result__isset __isset; + + void __set_success(const GetOpenWriteIdsResponse& val); + + void __set_o1(const NoSuchTxnException& val); + + void __set_o2(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_get_open_write_ids_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_open_write_ids_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_open_write_ids_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_open_write_ids_presult__isset { + _ThriftHiveMetastore_get_open_write_ids_presult__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_open_write_ids_presult__isset; + +class ThriftHiveMetastore_get_open_write_ids_presult { + public: + + + virtual ~ThriftHiveMetastore_get_open_write_ids_presult() throw(); + GetOpenWriteIdsResponse* success; + NoSuchTxnException o1; + MetaException o2; + + _ThriftHiveMetastore_get_open_write_ids_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_add_transactional_table_args__isset { + _ThriftHiveMetastore_add_transactional_table_args__isset() : rqst(false) {} + bool rqst :1; +} _ThriftHiveMetastore_add_transactional_table_args__isset; + +class ThriftHiveMetastore_add_transactional_table_args { + public: + + ThriftHiveMetastore_add_transactional_table_args(const ThriftHiveMetastore_add_transactional_table_args&); + ThriftHiveMetastore_add_transactional_table_args& operator=(const ThriftHiveMetastore_add_transactional_table_args&); + ThriftHiveMetastore_add_transactional_table_args() { + } + + virtual ~ThriftHiveMetastore_add_transactional_table_args() throw(); + AddTransactionalTableRequest rqst; + + _ThriftHiveMetastore_add_transactional_table_args__isset __isset; + + void __set_rqst(const AddTransactionalTableRequest& val); + + bool operator == (const ThriftHiveMetastore_add_transactional_table_args & rhs) const + { + if (!(rqst == rhs.rqst)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_add_transactional_table_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_add_transactional_table_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_add_transactional_table_pargs { + public: + + + virtual ~ThriftHiveMetastore_add_transactional_table_pargs() throw(); + const AddTransactionalTableRequest* rqst; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_add_transactional_table_result__isset { + _ThriftHiveMetastore_add_transactional_table_result__isset() : o1(false) {} + bool o1 :1; +} _ThriftHiveMetastore_add_transactional_table_result__isset; + +class ThriftHiveMetastore_add_transactional_table_result { + public: + + ThriftHiveMetastore_add_transactional_table_result(const ThriftHiveMetastore_add_transactional_table_result&); + ThriftHiveMetastore_add_transactional_table_result& operator=(const ThriftHiveMetastore_add_transactional_table_result&); + ThriftHiveMetastore_add_transactional_table_result() { + } + + virtual ~ThriftHiveMetastore_add_transactional_table_result() throw(); + MetaException o1; + + _ThriftHiveMetastore_add_transactional_table_result__isset __isset; + + void __set_o1(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_add_transactional_table_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_add_transactional_table_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_add_transactional_table_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_add_transactional_table_presult__isset { + _ThriftHiveMetastore_add_transactional_table_presult__isset() : o1(false) {} + bool o1 :1; +} _ThriftHiveMetastore_add_transactional_table_presult__isset; + +class ThriftHiveMetastore_add_transactional_table_presult { + public: + + + virtual ~ThriftHiveMetastore_add_transactional_table_presult() throw(); + MetaException o1; + + _ThriftHiveMetastore_add_transactional_table_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_allocate_table_write_id_args__isset { + _ThriftHiveMetastore_allocate_table_write_id_args__isset() : rqst(false) {} + bool rqst :1; +} _ThriftHiveMetastore_allocate_table_write_id_args__isset; + +class ThriftHiveMetastore_allocate_table_write_id_args { + public: + + ThriftHiveMetastore_allocate_table_write_id_args(const ThriftHiveMetastore_allocate_table_write_id_args&); + ThriftHiveMetastore_allocate_table_write_id_args& operator=(const ThriftHiveMetastore_allocate_table_write_id_args&); + ThriftHiveMetastore_allocate_table_write_id_args() { + } + + virtual ~ThriftHiveMetastore_allocate_table_write_id_args() throw(); + AllocateTableWriteIdRequest rqst; + + _ThriftHiveMetastore_allocate_table_write_id_args__isset __isset; + + void __set_rqst(const AllocateTableWriteIdRequest& val); + + bool operator == (const ThriftHiveMetastore_allocate_table_write_id_args & rhs) const + { + if (!(rqst == rhs.rqst)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_allocate_table_write_id_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_allocate_table_write_id_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_allocate_table_write_id_pargs { + public: + + + virtual ~ThriftHiveMetastore_allocate_table_write_id_pargs() throw(); + const AllocateTableWriteIdRequest* rqst; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_allocate_table_write_id_result__isset { + _ThriftHiveMetastore_allocate_table_write_id_result__isset() : success(false), o1(false), o2(false), o3(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_allocate_table_write_id_result__isset; + +class ThriftHiveMetastore_allocate_table_write_id_result { + public: + + ThriftHiveMetastore_allocate_table_write_id_result(const ThriftHiveMetastore_allocate_table_write_id_result&); + ThriftHiveMetastore_allocate_table_write_id_result& operator=(const ThriftHiveMetastore_allocate_table_write_id_result&); + ThriftHiveMetastore_allocate_table_write_id_result() { + } + + virtual ~ThriftHiveMetastore_allocate_table_write_id_result() throw(); + AllocateTableWriteIdResponse success; + NoSuchTxnException o1; + TxnAbortedException o2; + MetaException o3; + + _ThriftHiveMetastore_allocate_table_write_id_result__isset __isset; + + void __set_success(const AllocateTableWriteIdResponse& val); + + void __set_o1(const NoSuchTxnException& val); + + void __set_o2(const TxnAbortedException& val); + + void __set_o3(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_allocate_table_write_id_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_allocate_table_write_id_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_allocate_table_write_id_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_allocate_table_write_id_presult__isset { + _ThriftHiveMetastore_allocate_table_write_id_presult__isset() : success(false), o1(false), o2(false), o3(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_allocate_table_write_id_presult__isset; + +class ThriftHiveMetastore_allocate_table_write_id_presult { + public: + + + virtual ~ThriftHiveMetastore_allocate_table_write_id_presult() throw(); + AllocateTableWriteIdResponse* success; + NoSuchTxnException o1; + TxnAbortedException o2; + MetaException o3; + + _ThriftHiveMetastore_allocate_table_write_id_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_lock_args__isset { _ThriftHiveMetastore_lock_args__isset() : rqst(false) {} bool rqst :1; @@ -23841,6 +24205,15 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void commit_txn(const CommitTxnRequest& rqst); void send_commit_txn(const CommitTxnRequest& rqst); void recv_commit_txn(); + void get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst); + void send_get_open_write_ids(const GetOpenWriteIdsRequest& rqst); + void recv_get_open_write_ids(GetOpenWriteIdsResponse& _return); + void add_transactional_table(const AddTransactionalTableRequest& rqst); + void send_add_transactional_table(const AddTransactionalTableRequest& rqst); + void recv_add_transactional_table(); + void allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst); + void send_allocate_table_write_id(const AllocateTableWriteIdRequest& rqst); + void recv_allocate_table_write_id(AllocateTableWriteIdResponse& _return); void lock(LockResponse& _return, const LockRequest& rqst); void send_lock(const LockRequest& rqst); void recv_lock(LockResponse& _return); @@ -24116,6 +24489,9 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_abort_txn(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_abort_txns(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_commit_txn(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_open_write_ids(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_add_transactional_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_allocate_table_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_lock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_check_lock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_unlock(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -24303,6 +24679,9 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["abort_txn"] = &ThriftHiveMetastoreProcessor::process_abort_txn; processMap_["abort_txns"] = &ThriftHiveMetastoreProcessor::process_abort_txns; processMap_["commit_txn"] = &ThriftHiveMetastoreProcessor::process_commit_txn; + processMap_["get_open_write_ids"] = &ThriftHiveMetastoreProcessor::process_get_open_write_ids; + processMap_["add_transactional_table"] = &ThriftHiveMetastoreProcessor::process_add_transactional_table; + processMap_["allocate_table_write_id"] = &ThriftHiveMetastoreProcessor::process_allocate_table_write_id; processMap_["lock"] = &ThriftHiveMetastoreProcessor::process_lock; processMap_["check_lock"] = &ThriftHiveMetastoreProcessor::process_check_lock; processMap_["unlock"] = &ThriftHiveMetastoreProcessor::process_unlock; @@ -25736,6 +26115,35 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi ifaces_[i]->commit_txn(rqst); } + void get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_open_write_ids(_return, rqst); + } + ifaces_[i]->get_open_write_ids(_return, rqst); + return; + } + + void add_transactional_table(const AddTransactionalTableRequest& rqst) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->add_transactional_table(rqst); + } + ifaces_[i]->add_transactional_table(rqst); + } + + void allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->allocate_table_write_id(_return, rqst); + } + ifaces_[i]->allocate_table_write_id(_return, rqst); + return; + } + void lock(LockResponse& _return, const LockRequest& rqst) { size_t sz = ifaces_.size(); size_t i = 0; @@ -26583,6 +26991,15 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void commit_txn(const CommitTxnRequest& rqst); int32_t send_commit_txn(const CommitTxnRequest& rqst); void recv_commit_txn(const int32_t seqid); + void get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst); + int32_t send_get_open_write_ids(const GetOpenWriteIdsRequest& rqst); + void recv_get_open_write_ids(GetOpenWriteIdsResponse& _return, const int32_t seqid); + void add_transactional_table(const AddTransactionalTableRequest& rqst); + int32_t send_add_transactional_table(const AddTransactionalTableRequest& rqst); + void recv_add_transactional_table(const int32_t seqid); + void allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst); + int32_t send_allocate_table_write_id(const AllocateTableWriteIdRequest& rqst); + void recv_allocate_table_write_id(AllocateTableWriteIdResponse& _return, const int32_t seqid); void lock(LockResponse& _return, const LockRequest& rqst); int32_t send_lock(const LockRequest& rqst); void recv_lock(LockResponse& _return, const int32_t seqid); diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index 6a2ff6c..b5ad2f3 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -732,6 +732,21 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("commit_txn\n"); } + void get_open_write_ids(GetOpenWriteIdsResponse& _return, const GetOpenWriteIdsRequest& rqst) { + // Your implementation goes here + printf("get_open_write_ids\n"); + } + + void add_transactional_table(const AddTransactionalTableRequest& rqst) { + // Your implementation goes here + printf("add_transactional_table\n"); + } + + void allocate_table_write_id(AllocateTableWriteIdResponse& _return, const AllocateTableWriteIdRequest& rqst) { + // Your implementation goes here + printf("allocate_table_write_id\n"); + } + void lock(LockResponse& _return, const LockRequest& rqst) { // Your implementation goes here printf("lock\n"); diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 27f8c0f..8189115 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -14332,6 +14332,917 @@ void CommitTxnRequest::printTo(std::ostream& out) const { } +GetOpenWriteIdsRequest::~GetOpenWriteIdsRequest() throw() { +} + + +void GetOpenWriteIdsRequest::__set_currentTxnId(const int64_t val) { + this->currentTxnId = val; +} + +void GetOpenWriteIdsRequest::__set_tableNames(const std::vector & val) { + this->tableNames = val; +} + +uint32_t GetOpenWriteIdsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_currentTxnId = false; + bool isset_tableNames = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->currentTxnId); + isset_currentTxnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->tableNames.clear(); + uint32_t _size625; + ::apache::thrift::protocol::TType _etype628; + xfer += iprot->readListBegin(_etype628, _size625); + this->tableNames.resize(_size625); + uint32_t _i629; + for (_i629 = 0; _i629 < _size625; ++_i629) + { + xfer += iprot->readString(this->tableNames[_i629]); + } + xfer += iprot->readListEnd(); + } + isset_tableNames = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_currentTxnId) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tableNames) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetOpenWriteIdsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetOpenWriteIdsRequest"); + + xfer += oprot->writeFieldBegin("currentTxnId", ::apache::thrift::protocol::T_I64, 1); + xfer += oprot->writeI64(this->currentTxnId); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tableNames", ::apache::thrift::protocol::T_LIST, 2); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tableNames.size())); + std::vector ::const_iterator _iter630; + for (_iter630 = this->tableNames.begin(); _iter630 != this->tableNames.end(); ++_iter630) + { + xfer += oprot->writeString((*_iter630)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetOpenWriteIdsRequest &a, GetOpenWriteIdsRequest &b) { + using ::std::swap; + swap(a.currentTxnId, b.currentTxnId); + swap(a.tableNames, b.tableNames); +} + +GetOpenWriteIdsRequest::GetOpenWriteIdsRequest(const GetOpenWriteIdsRequest& other631) { + currentTxnId = other631.currentTxnId; + tableNames = other631.tableNames; +} +GetOpenWriteIdsRequest& GetOpenWriteIdsRequest::operator=(const GetOpenWriteIdsRequest& other632) { + currentTxnId = other632.currentTxnId; + tableNames = other632.tableNames; + return *this; +} +void GetOpenWriteIdsRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetOpenWriteIdsRequest("; + out << "currentTxnId=" << to_string(currentTxnId); + out << ", " << "tableNames=" << to_string(tableNames); + out << ")"; +} + + +OpenWriteIds::~OpenWriteIds() throw() { +} + + +void OpenWriteIds::__set_tableName(const std::string& val) { + this->tableName = val; +} + +void OpenWriteIds::__set_writeIdHighWaterMark(const int64_t val) { + this->writeIdHighWaterMark = val; +} + +void OpenWriteIds::__set_openWriteIds(const std::vector & val) { + this->openWriteIds = val; +} + +void OpenWriteIds::__set_minWriteId(const int64_t val) { + this->minWriteId = val; +__isset.minWriteId = true; +} + +void OpenWriteIds::__set_abortedBits(const std::string& val) { + this->abortedBits = val; +} + +uint32_t OpenWriteIds::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_tableName = false; + bool isset_writeIdHighWaterMark = false; + bool isset_openWriteIds = false; + bool isset_abortedBits = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tableName); + isset_tableName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeIdHighWaterMark); + isset_writeIdHighWaterMark = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->openWriteIds.clear(); + uint32_t _size633; + ::apache::thrift::protocol::TType _etype636; + xfer += iprot->readListBegin(_etype636, _size633); + this->openWriteIds.resize(_size633); + uint32_t _i637; + for (_i637 = 0; _i637 < _size633; ++_i637) + { + xfer += iprot->readI64(this->openWriteIds[_i637]); + } + xfer += iprot->readListEnd(); + } + isset_openWriteIds = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->minWriteId); + this->__isset.minWriteId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readBinary(this->abortedBits); + isset_abortedBits = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_tableName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_writeIdHighWaterMark) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_openWriteIds) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_abortedBits) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t OpenWriteIds::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("OpenWriteIds"); + + xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->tableName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("writeIdHighWaterMark", ::apache::thrift::protocol::T_I64, 2); + xfer += oprot->writeI64(this->writeIdHighWaterMark); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("openWriteIds", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->openWriteIds.size())); + std::vector ::const_iterator _iter638; + for (_iter638 = this->openWriteIds.begin(); _iter638 != this->openWriteIds.end(); ++_iter638) + { + xfer += oprot->writeI64((*_iter638)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + if (this->__isset.minWriteId) { + xfer += oprot->writeFieldBegin("minWriteId", ::apache::thrift::protocol::T_I64, 4); + xfer += oprot->writeI64(this->minWriteId); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldBegin("abortedBits", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeBinary(this->abortedBits); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(OpenWriteIds &a, OpenWriteIds &b) { + using ::std::swap; + swap(a.tableName, b.tableName); + swap(a.writeIdHighWaterMark, b.writeIdHighWaterMark); + swap(a.openWriteIds, b.openWriteIds); + swap(a.minWriteId, b.minWriteId); + swap(a.abortedBits, b.abortedBits); + swap(a.__isset, b.__isset); +} + +OpenWriteIds::OpenWriteIds(const OpenWriteIds& other639) { + tableName = other639.tableName; + writeIdHighWaterMark = other639.writeIdHighWaterMark; + openWriteIds = other639.openWriteIds; + minWriteId = other639.minWriteId; + abortedBits = other639.abortedBits; + __isset = other639.__isset; +} +OpenWriteIds& OpenWriteIds::operator=(const OpenWriteIds& other640) { + tableName = other640.tableName; + writeIdHighWaterMark = other640.writeIdHighWaterMark; + openWriteIds = other640.openWriteIds; + minWriteId = other640.minWriteId; + abortedBits = other640.abortedBits; + __isset = other640.__isset; + return *this; +} +void OpenWriteIds::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "OpenWriteIds("; + out << "tableName=" << to_string(tableName); + out << ", " << "writeIdHighWaterMark=" << to_string(writeIdHighWaterMark); + out << ", " << "openWriteIds=" << to_string(openWriteIds); + out << ", " << "minWriteId="; (__isset.minWriteId ? (out << to_string(minWriteId)) : (out << "")); + out << ", " << "abortedBits=" << to_string(abortedBits); + out << ")"; +} + + +GetOpenWriteIdsResponse::~GetOpenWriteIdsResponse() throw() { +} + + +void GetOpenWriteIdsResponse::__set_openWriteIds(const std::vector & val) { + this->openWriteIds = val; +} + +uint32_t GetOpenWriteIdsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_openWriteIds = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->openWriteIds.clear(); + uint32_t _size641; + ::apache::thrift::protocol::TType _etype644; + xfer += iprot->readListBegin(_etype644, _size641); + this->openWriteIds.resize(_size641); + uint32_t _i645; + for (_i645 = 0; _i645 < _size641; ++_i645) + { + xfer += this->openWriteIds[_i645].read(iprot); + } + xfer += iprot->readListEnd(); + } + isset_openWriteIds = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_openWriteIds) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetOpenWriteIdsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetOpenWriteIdsResponse"); + + xfer += oprot->writeFieldBegin("openWriteIds", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->openWriteIds.size())); + std::vector ::const_iterator _iter646; + for (_iter646 = this->openWriteIds.begin(); _iter646 != this->openWriteIds.end(); ++_iter646) + { + xfer += (*_iter646).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetOpenWriteIdsResponse &a, GetOpenWriteIdsResponse &b) { + using ::std::swap; + swap(a.openWriteIds, b.openWriteIds); +} + +GetOpenWriteIdsResponse::GetOpenWriteIdsResponse(const GetOpenWriteIdsResponse& other647) { + openWriteIds = other647.openWriteIds; +} +GetOpenWriteIdsResponse& GetOpenWriteIdsResponse::operator=(const GetOpenWriteIdsResponse& other648) { + openWriteIds = other648.openWriteIds; + return *this; +} +void GetOpenWriteIdsResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetOpenWriteIdsResponse("; + out << "openWriteIds=" << to_string(openWriteIds); + out << ")"; +} + + +AddTransactionalTableRequest::~AddTransactionalTableRequest() throw() { +} + + +void AddTransactionalTableRequest::__set_dbName(const std::string& val) { + this->dbName = val; +} + +void AddTransactionalTableRequest::__set_tableName(const std::string& val) { + this->tableName = val; +} + +uint32_t AddTransactionalTableRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_dbName = false; + bool isset_tableName = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbName); + isset_dbName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tableName); + isset_tableName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_dbName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tableName) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t AddTransactionalTableRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("AddTransactionalTableRequest"); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tableName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(AddTransactionalTableRequest &a, AddTransactionalTableRequest &b) { + using ::std::swap; + swap(a.dbName, b.dbName); + swap(a.tableName, b.tableName); +} + +AddTransactionalTableRequest::AddTransactionalTableRequest(const AddTransactionalTableRequest& other649) { + dbName = other649.dbName; + tableName = other649.tableName; +} +AddTransactionalTableRequest& AddTransactionalTableRequest::operator=(const AddTransactionalTableRequest& other650) { + dbName = other650.dbName; + tableName = other650.tableName; + return *this; +} +void AddTransactionalTableRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "AddTransactionalTableRequest("; + out << "dbName=" << to_string(dbName); + out << ", " << "tableName=" << to_string(tableName); + out << ")"; +} + + +AllocateTableWriteIdRequest::~AllocateTableWriteIdRequest() throw() { +} + + +void AllocateTableWriteIdRequest::__set_txnIds(const std::vector & val) { + this->txnIds = val; +} + +void AllocateTableWriteIdRequest::__set_dbName(const std::string& val) { + this->dbName = val; +} + +void AllocateTableWriteIdRequest::__set_tableName(const std::string& val) { + this->tableName = val; +} + +uint32_t AllocateTableWriteIdRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_txnIds = false; + bool isset_dbName = false; + bool isset_tableName = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->txnIds.clear(); + uint32_t _size651; + ::apache::thrift::protocol::TType _etype654; + xfer += iprot->readListBegin(_etype654, _size651); + this->txnIds.resize(_size651); + uint32_t _i655; + for (_i655 = 0; _i655 < _size651; ++_i655) + { + xfer += iprot->readI64(this->txnIds[_i655]); + } + xfer += iprot->readListEnd(); + } + isset_txnIds = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbName); + isset_dbName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tableName); + isset_tableName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_txnIds) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_dbName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tableName) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t AllocateTableWriteIdRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("AllocateTableWriteIdRequest"); + + xfer += oprot->writeFieldBegin("txnIds", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txnIds.size())); + std::vector ::const_iterator _iter656; + for (_iter656 = this->txnIds.begin(); _iter656 != this->txnIds.end(); ++_iter656) + { + xfer += oprot->writeI64((*_iter656)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->dbName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->tableName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(AllocateTableWriteIdRequest &a, AllocateTableWriteIdRequest &b) { + using ::std::swap; + swap(a.txnIds, b.txnIds); + swap(a.dbName, b.dbName); + swap(a.tableName, b.tableName); +} + +AllocateTableWriteIdRequest::AllocateTableWriteIdRequest(const AllocateTableWriteIdRequest& other657) { + txnIds = other657.txnIds; + dbName = other657.dbName; + tableName = other657.tableName; +} +AllocateTableWriteIdRequest& AllocateTableWriteIdRequest::operator=(const AllocateTableWriteIdRequest& other658) { + txnIds = other658.txnIds; + dbName = other658.dbName; + tableName = other658.tableName; + return *this; +} +void AllocateTableWriteIdRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "AllocateTableWriteIdRequest("; + out << "txnIds=" << to_string(txnIds); + out << ", " << "dbName=" << to_string(dbName); + out << ", " << "tableName=" << to_string(tableName); + out << ")"; +} + + +TxnToWriteId::~TxnToWriteId() throw() { +} + + +void TxnToWriteId::__set_txnId(const int64_t val) { + this->txnId = val; +} + +void TxnToWriteId::__set_writeId(const int64_t val) { + this->writeId = val; +} + +uint32_t TxnToWriteId::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_txnId = false; + bool isset_writeId = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->txnId); + isset_txnId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + isset_writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_txnId) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_writeId) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t TxnToWriteId::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("TxnToWriteId"); + + xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 1); + xfer += oprot->writeI64(this->txnId); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 2); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(TxnToWriteId &a, TxnToWriteId &b) { + using ::std::swap; + swap(a.txnId, b.txnId); + swap(a.writeId, b.writeId); +} + +TxnToWriteId::TxnToWriteId(const TxnToWriteId& other659) { + txnId = other659.txnId; + writeId = other659.writeId; +} +TxnToWriteId& TxnToWriteId::operator=(const TxnToWriteId& other660) { + txnId = other660.txnId; + writeId = other660.writeId; + return *this; +} +void TxnToWriteId::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "TxnToWriteId("; + out << "txnId=" << to_string(txnId); + out << ", " << "writeId=" << to_string(writeId); + out << ")"; +} + + +AllocateTableWriteIdResponse::~AllocateTableWriteIdResponse() throw() { +} + + +void AllocateTableWriteIdResponse::__set_txnToWriteIds(const std::vector & val) { + this->txnToWriteIds = val; +} + +uint32_t AllocateTableWriteIdResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_txnToWriteIds = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->txnToWriteIds.clear(); + uint32_t _size661; + ::apache::thrift::protocol::TType _etype664; + xfer += iprot->readListBegin(_etype664, _size661); + this->txnToWriteIds.resize(_size661); + uint32_t _i665; + for (_i665 = 0; _i665 < _size661; ++_i665) + { + xfer += this->txnToWriteIds[_i665].read(iprot); + } + xfer += iprot->readListEnd(); + } + isset_txnToWriteIds = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_txnToWriteIds) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t AllocateTableWriteIdResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("AllocateTableWriteIdResponse"); + + xfer += oprot->writeFieldBegin("txnToWriteIds", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->txnToWriteIds.size())); + std::vector ::const_iterator _iter666; + for (_iter666 = this->txnToWriteIds.begin(); _iter666 != this->txnToWriteIds.end(); ++_iter666) + { + xfer += (*_iter666).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(AllocateTableWriteIdResponse &a, AllocateTableWriteIdResponse &b) { + using ::std::swap; + swap(a.txnToWriteIds, b.txnToWriteIds); +} + +AllocateTableWriteIdResponse::AllocateTableWriteIdResponse(const AllocateTableWriteIdResponse& other667) { + txnToWriteIds = other667.txnToWriteIds; +} +AllocateTableWriteIdResponse& AllocateTableWriteIdResponse::operator=(const AllocateTableWriteIdResponse& other668) { + txnToWriteIds = other668.txnToWriteIds; + return *this; +} +void AllocateTableWriteIdResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "AllocateTableWriteIdResponse("; + out << "txnToWriteIds=" << to_string(txnToWriteIds); + out << ")"; +} + + LockComponent::~LockComponent() throw() { } @@ -14399,9 +15310,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast625; - xfer += iprot->readI32(ecast625); - this->type = (LockType::type)ecast625; + int32_t ecast669; + xfer += iprot->readI32(ecast669); + this->type = (LockType::type)ecast669; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -14409,9 +15320,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast626; - xfer += iprot->readI32(ecast626); - this->level = (LockLevel::type)ecast626; + int32_t ecast670; + xfer += iprot->readI32(ecast670); + this->level = (LockLevel::type)ecast670; isset_level = true; } else { xfer += iprot->skip(ftype); @@ -14443,9 +15354,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast627; - xfer += iprot->readI32(ecast627); - this->operationType = (DataOperationType::type)ecast627; + int32_t ecast671; + xfer += iprot->readI32(ecast671); + this->operationType = (DataOperationType::type)ecast671; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -14545,27 +15456,27 @@ void swap(LockComponent &a, LockComponent &b) { swap(a.__isset, b.__isset); } -LockComponent::LockComponent(const LockComponent& other628) { - type = other628.type; - level = other628.level; - dbname = other628.dbname; - tablename = other628.tablename; - partitionname = other628.partitionname; - operationType = other628.operationType; - isAcid = other628.isAcid; - isDynamicPartitionWrite = other628.isDynamicPartitionWrite; - __isset = other628.__isset; -} -LockComponent& LockComponent::operator=(const LockComponent& other629) { - type = other629.type; - level = other629.level; - dbname = other629.dbname; - tablename = other629.tablename; - partitionname = other629.partitionname; - operationType = other629.operationType; - isAcid = other629.isAcid; - isDynamicPartitionWrite = other629.isDynamicPartitionWrite; - __isset = other629.__isset; +LockComponent::LockComponent(const LockComponent& other672) { + type = other672.type; + level = other672.level; + dbname = other672.dbname; + tablename = other672.tablename; + partitionname = other672.partitionname; + operationType = other672.operationType; + isAcid = other672.isAcid; + isDynamicPartitionWrite = other672.isDynamicPartitionWrite; + __isset = other672.__isset; +} +LockComponent& LockComponent::operator=(const LockComponent& other673) { + type = other673.type; + level = other673.level; + dbname = other673.dbname; + tablename = other673.tablename; + partitionname = other673.partitionname; + operationType = other673.operationType; + isAcid = other673.isAcid; + isDynamicPartitionWrite = other673.isDynamicPartitionWrite; + __isset = other673.__isset; return *this; } void LockComponent::printTo(std::ostream& out) const { @@ -14637,14 +15548,14 @@ uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->component.clear(); - uint32_t _size630; - ::apache::thrift::protocol::TType _etype633; - xfer += iprot->readListBegin(_etype633, _size630); - this->component.resize(_size630); - uint32_t _i634; - for (_i634 = 0; _i634 < _size630; ++_i634) + uint32_t _size674; + ::apache::thrift::protocol::TType _etype677; + xfer += iprot->readListBegin(_etype677, _size674); + this->component.resize(_size674); + uint32_t _i678; + for (_i678 = 0; _i678 < _size674; ++_i678) { - xfer += this->component[_i634].read(iprot); + xfer += this->component[_i678].read(iprot); } xfer += iprot->readListEnd(); } @@ -14711,10 +15622,10 @@ uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeFieldBegin("component", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->component.size())); - std::vector ::const_iterator _iter635; - for (_iter635 = this->component.begin(); _iter635 != this->component.end(); ++_iter635) + std::vector ::const_iterator _iter679; + for (_iter679 = this->component.begin(); _iter679 != this->component.end(); ++_iter679) { - xfer += (*_iter635).write(oprot); + xfer += (*_iter679).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14753,21 +15664,21 @@ void swap(LockRequest &a, LockRequest &b) { swap(a.__isset, b.__isset); } -LockRequest::LockRequest(const LockRequest& other636) { - component = other636.component; - txnid = other636.txnid; - user = other636.user; - hostname = other636.hostname; - agentInfo = other636.agentInfo; - __isset = other636.__isset; -} -LockRequest& LockRequest::operator=(const LockRequest& other637) { - component = other637.component; - txnid = other637.txnid; - user = other637.user; - hostname = other637.hostname; - agentInfo = other637.agentInfo; - __isset = other637.__isset; +LockRequest::LockRequest(const LockRequest& other680) { + component = other680.component; + txnid = other680.txnid; + user = other680.user; + hostname = other680.hostname; + agentInfo = other680.agentInfo; + __isset = other680.__isset; +} +LockRequest& LockRequest::operator=(const LockRequest& other681) { + component = other681.component; + txnid = other681.txnid; + user = other681.user; + hostname = other681.hostname; + agentInfo = other681.agentInfo; + __isset = other681.__isset; return *this; } void LockRequest::printTo(std::ostream& out) const { @@ -14827,9 +15738,9 @@ uint32_t LockResponse::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast638; - xfer += iprot->readI32(ecast638); - this->state = (LockState::type)ecast638; + int32_t ecast682; + xfer += iprot->readI32(ecast682); + this->state = (LockState::type)ecast682; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -14875,13 +15786,13 @@ void swap(LockResponse &a, LockResponse &b) { swap(a.state, b.state); } -LockResponse::LockResponse(const LockResponse& other639) { - lockid = other639.lockid; - state = other639.state; +LockResponse::LockResponse(const LockResponse& other683) { + lockid = other683.lockid; + state = other683.state; } -LockResponse& LockResponse::operator=(const LockResponse& other640) { - lockid = other640.lockid; - state = other640.state; +LockResponse& LockResponse::operator=(const LockResponse& other684) { + lockid = other684.lockid; + state = other684.state; return *this; } void LockResponse::printTo(std::ostream& out) const { @@ -15003,17 +15914,17 @@ void swap(CheckLockRequest &a, CheckLockRequest &b) { swap(a.__isset, b.__isset); } -CheckLockRequest::CheckLockRequest(const CheckLockRequest& other641) { - lockid = other641.lockid; - txnid = other641.txnid; - elapsed_ms = other641.elapsed_ms; - __isset = other641.__isset; +CheckLockRequest::CheckLockRequest(const CheckLockRequest& other685) { + lockid = other685.lockid; + txnid = other685.txnid; + elapsed_ms = other685.elapsed_ms; + __isset = other685.__isset; } -CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other642) { - lockid = other642.lockid; - txnid = other642.txnid; - elapsed_ms = other642.elapsed_ms; - __isset = other642.__isset; +CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other686) { + lockid = other686.lockid; + txnid = other686.txnid; + elapsed_ms = other686.elapsed_ms; + __isset = other686.__isset; return *this; } void CheckLockRequest::printTo(std::ostream& out) const { @@ -15097,11 +16008,11 @@ void swap(UnlockRequest &a, UnlockRequest &b) { swap(a.lockid, b.lockid); } -UnlockRequest::UnlockRequest(const UnlockRequest& other643) { - lockid = other643.lockid; +UnlockRequest::UnlockRequest(const UnlockRequest& other687) { + lockid = other687.lockid; } -UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other644) { - lockid = other644.lockid; +UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other688) { + lockid = other688.lockid; return *this; } void UnlockRequest::printTo(std::ostream& out) const { @@ -15240,19 +16151,19 @@ void swap(ShowLocksRequest &a, ShowLocksRequest &b) { swap(a.__isset, b.__isset); } -ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other645) { - dbname = other645.dbname; - tablename = other645.tablename; - partname = other645.partname; - isExtended = other645.isExtended; - __isset = other645.__isset; +ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other689) { + dbname = other689.dbname; + tablename = other689.tablename; + partname = other689.partname; + isExtended = other689.isExtended; + __isset = other689.__isset; } -ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other646) { - dbname = other646.dbname; - tablename = other646.tablename; - partname = other646.partname; - isExtended = other646.isExtended; - __isset = other646.__isset; +ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other690) { + dbname = other690.dbname; + tablename = other690.tablename; + partname = other690.partname; + isExtended = other690.isExtended; + __isset = other690.__isset; return *this; } void ShowLocksRequest::printTo(std::ostream& out) const { @@ -15405,9 +16316,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast647; - xfer += iprot->readI32(ecast647); - this->state = (LockState::type)ecast647; + int32_t ecast691; + xfer += iprot->readI32(ecast691); + this->state = (LockState::type)ecast691; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -15415,9 +16326,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast648; - xfer += iprot->readI32(ecast648); - this->type = (LockType::type)ecast648; + int32_t ecast692; + xfer += iprot->readI32(ecast692); + this->type = (LockType::type)ecast692; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -15633,43 +16544,43 @@ void swap(ShowLocksResponseElement &a, ShowLocksResponseElement &b) { swap(a.__isset, b.__isset); } -ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other649) { - lockid = other649.lockid; - dbname = other649.dbname; - tablename = other649.tablename; - partname = other649.partname; - state = other649.state; - type = other649.type; - txnid = other649.txnid; - lastheartbeat = other649.lastheartbeat; - acquiredat = other649.acquiredat; - user = other649.user; - hostname = other649.hostname; - heartbeatCount = other649.heartbeatCount; - agentInfo = other649.agentInfo; - blockedByExtId = other649.blockedByExtId; - blockedByIntId = other649.blockedByIntId; - lockIdInternal = other649.lockIdInternal; - __isset = other649.__isset; -} -ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other650) { - lockid = other650.lockid; - dbname = other650.dbname; - tablename = other650.tablename; - partname = other650.partname; - state = other650.state; - type = other650.type; - txnid = other650.txnid; - lastheartbeat = other650.lastheartbeat; - acquiredat = other650.acquiredat; - user = other650.user; - hostname = other650.hostname; - heartbeatCount = other650.heartbeatCount; - agentInfo = other650.agentInfo; - blockedByExtId = other650.blockedByExtId; - blockedByIntId = other650.blockedByIntId; - lockIdInternal = other650.lockIdInternal; - __isset = other650.__isset; +ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other693) { + lockid = other693.lockid; + dbname = other693.dbname; + tablename = other693.tablename; + partname = other693.partname; + state = other693.state; + type = other693.type; + txnid = other693.txnid; + lastheartbeat = other693.lastheartbeat; + acquiredat = other693.acquiredat; + user = other693.user; + hostname = other693.hostname; + heartbeatCount = other693.heartbeatCount; + agentInfo = other693.agentInfo; + blockedByExtId = other693.blockedByExtId; + blockedByIntId = other693.blockedByIntId; + lockIdInternal = other693.lockIdInternal; + __isset = other693.__isset; +} +ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other694) { + lockid = other694.lockid; + dbname = other694.dbname; + tablename = other694.tablename; + partname = other694.partname; + state = other694.state; + type = other694.type; + txnid = other694.txnid; + lastheartbeat = other694.lastheartbeat; + acquiredat = other694.acquiredat; + user = other694.user; + hostname = other694.hostname; + heartbeatCount = other694.heartbeatCount; + agentInfo = other694.agentInfo; + blockedByExtId = other694.blockedByExtId; + blockedByIntId = other694.blockedByIntId; + lockIdInternal = other694.lockIdInternal; + __isset = other694.__isset; return *this; } void ShowLocksResponseElement::printTo(std::ostream& out) const { @@ -15728,14 +16639,14 @@ uint32_t ShowLocksResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->locks.clear(); - uint32_t _size651; - ::apache::thrift::protocol::TType _etype654; - xfer += iprot->readListBegin(_etype654, _size651); - this->locks.resize(_size651); - uint32_t _i655; - for (_i655 = 0; _i655 < _size651; ++_i655) + uint32_t _size695; + ::apache::thrift::protocol::TType _etype698; + xfer += iprot->readListBegin(_etype698, _size695); + this->locks.resize(_size695); + uint32_t _i699; + for (_i699 = 0; _i699 < _size695; ++_i699) { - xfer += this->locks[_i655].read(iprot); + xfer += this->locks[_i699].read(iprot); } xfer += iprot->readListEnd(); } @@ -15764,10 +16675,10 @@ uint32_t ShowLocksResponse::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("locks", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->locks.size())); - std::vector ::const_iterator _iter656; - for (_iter656 = this->locks.begin(); _iter656 != this->locks.end(); ++_iter656) + std::vector ::const_iterator _iter700; + for (_iter700 = this->locks.begin(); _iter700 != this->locks.end(); ++_iter700) { - xfer += (*_iter656).write(oprot); + xfer += (*_iter700).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15784,13 +16695,13 @@ void swap(ShowLocksResponse &a, ShowLocksResponse &b) { swap(a.__isset, b.__isset); } -ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other657) { - locks = other657.locks; - __isset = other657.__isset; +ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other701) { + locks = other701.locks; + __isset = other701.__isset; } -ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other658) { - locks = other658.locks; - __isset = other658.__isset; +ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other702) { + locks = other702.locks; + __isset = other702.__isset; return *this; } void ShowLocksResponse::printTo(std::ostream& out) const { @@ -15891,15 +16802,15 @@ void swap(HeartbeatRequest &a, HeartbeatRequest &b) { swap(a.__isset, b.__isset); } -HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other659) { - lockid = other659.lockid; - txnid = other659.txnid; - __isset = other659.__isset; +HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other703) { + lockid = other703.lockid; + txnid = other703.txnid; + __isset = other703.__isset; } -HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other660) { - lockid = other660.lockid; - txnid = other660.txnid; - __isset = other660.__isset; +HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other704) { + lockid = other704.lockid; + txnid = other704.txnid; + __isset = other704.__isset; return *this; } void HeartbeatRequest::printTo(std::ostream& out) const { @@ -16002,13 +16913,13 @@ void swap(HeartbeatTxnRangeRequest &a, HeartbeatTxnRangeRequest &b) { swap(a.max, b.max); } -HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other661) { - min = other661.min; - max = other661.max; +HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other705) { + min = other705.min; + max = other705.max; } -HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other662) { - min = other662.min; - max = other662.max; +HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other706) { + min = other706.min; + max = other706.max; return *this; } void HeartbeatTxnRangeRequest::printTo(std::ostream& out) const { @@ -16059,15 +16970,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->aborted.clear(); - uint32_t _size663; - ::apache::thrift::protocol::TType _etype666; - xfer += iprot->readSetBegin(_etype666, _size663); - uint32_t _i667; - for (_i667 = 0; _i667 < _size663; ++_i667) + uint32_t _size707; + ::apache::thrift::protocol::TType _etype710; + xfer += iprot->readSetBegin(_etype710, _size707); + uint32_t _i711; + for (_i711 = 0; _i711 < _size707; ++_i711) { - int64_t _elem668; - xfer += iprot->readI64(_elem668); - this->aborted.insert(_elem668); + int64_t _elem712; + xfer += iprot->readI64(_elem712); + this->aborted.insert(_elem712); } xfer += iprot->readSetEnd(); } @@ -16080,15 +16991,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->nosuch.clear(); - uint32_t _size669; - ::apache::thrift::protocol::TType _etype672; - xfer += iprot->readSetBegin(_etype672, _size669); - uint32_t _i673; - for (_i673 = 0; _i673 < _size669; ++_i673) + uint32_t _size713; + ::apache::thrift::protocol::TType _etype716; + xfer += iprot->readSetBegin(_etype716, _size713); + uint32_t _i717; + for (_i717 = 0; _i717 < _size713; ++_i717) { - int64_t _elem674; - xfer += iprot->readI64(_elem674); - this->nosuch.insert(_elem674); + int64_t _elem718; + xfer += iprot->readI64(_elem718); + this->nosuch.insert(_elem718); } xfer += iprot->readSetEnd(); } @@ -16121,10 +17032,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("aborted", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->aborted.size())); - std::set ::const_iterator _iter675; - for (_iter675 = this->aborted.begin(); _iter675 != this->aborted.end(); ++_iter675) + std::set ::const_iterator _iter719; + for (_iter719 = this->aborted.begin(); _iter719 != this->aborted.end(); ++_iter719) { - xfer += oprot->writeI64((*_iter675)); + xfer += oprot->writeI64((*_iter719)); } xfer += oprot->writeSetEnd(); } @@ -16133,10 +17044,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("nosuch", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->nosuch.size())); - std::set ::const_iterator _iter676; - for (_iter676 = this->nosuch.begin(); _iter676 != this->nosuch.end(); ++_iter676) + std::set ::const_iterator _iter720; + for (_iter720 = this->nosuch.begin(); _iter720 != this->nosuch.end(); ++_iter720) { - xfer += oprot->writeI64((*_iter676)); + xfer += oprot->writeI64((*_iter720)); } xfer += oprot->writeSetEnd(); } @@ -16153,13 +17064,13 @@ void swap(HeartbeatTxnRangeResponse &a, HeartbeatTxnRangeResponse &b) { swap(a.nosuch, b.nosuch); } -HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other677) { - aborted = other677.aborted; - nosuch = other677.nosuch; +HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other721) { + aborted = other721.aborted; + nosuch = other721.nosuch; } -HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other678) { - aborted = other678.aborted; - nosuch = other678.nosuch; +HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other722) { + aborted = other722.aborted; + nosuch = other722.nosuch; return *this; } void HeartbeatTxnRangeResponse::printTo(std::ostream& out) const { @@ -16252,9 +17163,9 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast679; - xfer += iprot->readI32(ecast679); - this->type = (CompactionType::type)ecast679; + int32_t ecast723; + xfer += iprot->readI32(ecast723); + this->type = (CompactionType::type)ecast723; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -16272,17 +17183,17 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size680; - ::apache::thrift::protocol::TType _ktype681; - ::apache::thrift::protocol::TType _vtype682; - xfer += iprot->readMapBegin(_ktype681, _vtype682, _size680); - uint32_t _i684; - for (_i684 = 0; _i684 < _size680; ++_i684) + uint32_t _size724; + ::apache::thrift::protocol::TType _ktype725; + ::apache::thrift::protocol::TType _vtype726; + xfer += iprot->readMapBegin(_ktype725, _vtype726, _size724); + uint32_t _i728; + for (_i728 = 0; _i728 < _size724; ++_i728) { - std::string _key685; - xfer += iprot->readString(_key685); - std::string& _val686 = this->properties[_key685]; - xfer += iprot->readString(_val686); + std::string _key729; + xfer += iprot->readString(_key729); + std::string& _val730 = this->properties[_key729]; + xfer += iprot->readString(_val730); } xfer += iprot->readMapEnd(); } @@ -16340,11 +17251,11 @@ uint32_t CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 6); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter687; - for (_iter687 = this->properties.begin(); _iter687 != this->properties.end(); ++_iter687) + std::map ::const_iterator _iter731; + for (_iter731 = this->properties.begin(); _iter731 != this->properties.end(); ++_iter731) { - xfer += oprot->writeString(_iter687->first); - xfer += oprot->writeString(_iter687->second); + xfer += oprot->writeString(_iter731->first); + xfer += oprot->writeString(_iter731->second); } xfer += oprot->writeMapEnd(); } @@ -16366,23 +17277,23 @@ void swap(CompactionRequest &a, CompactionRequest &b) { swap(a.__isset, b.__isset); } -CompactionRequest::CompactionRequest(const CompactionRequest& other688) { - dbname = other688.dbname; - tablename = other688.tablename; - partitionname = other688.partitionname; - type = other688.type; - runas = other688.runas; - properties = other688.properties; - __isset = other688.__isset; -} -CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other689) { - dbname = other689.dbname; - tablename = other689.tablename; - partitionname = other689.partitionname; - type = other689.type; - runas = other689.runas; - properties = other689.properties; - __isset = other689.__isset; +CompactionRequest::CompactionRequest(const CompactionRequest& other732) { + dbname = other732.dbname; + tablename = other732.tablename; + partitionname = other732.partitionname; + type = other732.type; + runas = other732.runas; + properties = other732.properties; + __isset = other732.__isset; +} +CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other733) { + dbname = other733.dbname; + tablename = other733.tablename; + partitionname = other733.partitionname; + type = other733.type; + runas = other733.runas; + properties = other733.properties; + __isset = other733.__isset; return *this; } void CompactionRequest::printTo(std::ostream& out) const { @@ -16509,15 +17420,15 @@ void swap(CompactionResponse &a, CompactionResponse &b) { swap(a.accepted, b.accepted); } -CompactionResponse::CompactionResponse(const CompactionResponse& other690) { - id = other690.id; - state = other690.state; - accepted = other690.accepted; +CompactionResponse::CompactionResponse(const CompactionResponse& other734) { + id = other734.id; + state = other734.state; + accepted = other734.accepted; } -CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other691) { - id = other691.id; - state = other691.state; - accepted = other691.accepted; +CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other735) { + id = other735.id; + state = other735.state; + accepted = other735.accepted; return *this; } void CompactionResponse::printTo(std::ostream& out) const { @@ -16578,11 +17489,11 @@ void swap(ShowCompactRequest &a, ShowCompactRequest &b) { (void) b; } -ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other692) { - (void) other692; +ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other736) { + (void) other736; } -ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other693) { - (void) other693; +ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other737) { + (void) other737; return *this; } void ShowCompactRequest::printTo(std::ostream& out) const { @@ -16708,9 +17619,9 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast694; - xfer += iprot->readI32(ecast694); - this->type = (CompactionType::type)ecast694; + int32_t ecast738; + xfer += iprot->readI32(ecast738); + this->type = (CompactionType::type)ecast738; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -16897,37 +17808,37 @@ void swap(ShowCompactResponseElement &a, ShowCompactResponseElement &b) { swap(a.__isset, b.__isset); } -ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other695) { - dbname = other695.dbname; - tablename = other695.tablename; - partitionname = other695.partitionname; - type = other695.type; - state = other695.state; - workerid = other695.workerid; - start = other695.start; - runAs = other695.runAs; - hightestTxnId = other695.hightestTxnId; - metaInfo = other695.metaInfo; - endTime = other695.endTime; - hadoopJobId = other695.hadoopJobId; - id = other695.id; - __isset = other695.__isset; -} -ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other696) { - dbname = other696.dbname; - tablename = other696.tablename; - partitionname = other696.partitionname; - type = other696.type; - state = other696.state; - workerid = other696.workerid; - start = other696.start; - runAs = other696.runAs; - hightestTxnId = other696.hightestTxnId; - metaInfo = other696.metaInfo; - endTime = other696.endTime; - hadoopJobId = other696.hadoopJobId; - id = other696.id; - __isset = other696.__isset; +ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other739) { + dbname = other739.dbname; + tablename = other739.tablename; + partitionname = other739.partitionname; + type = other739.type; + state = other739.state; + workerid = other739.workerid; + start = other739.start; + runAs = other739.runAs; + hightestTxnId = other739.hightestTxnId; + metaInfo = other739.metaInfo; + endTime = other739.endTime; + hadoopJobId = other739.hadoopJobId; + id = other739.id; + __isset = other739.__isset; +} +ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other740) { + dbname = other740.dbname; + tablename = other740.tablename; + partitionname = other740.partitionname; + type = other740.type; + state = other740.state; + workerid = other740.workerid; + start = other740.start; + runAs = other740.runAs; + hightestTxnId = other740.hightestTxnId; + metaInfo = other740.metaInfo; + endTime = other740.endTime; + hadoopJobId = other740.hadoopJobId; + id = other740.id; + __isset = other740.__isset; return *this; } void ShowCompactResponseElement::printTo(std::ostream& out) const { @@ -16984,14 +17895,14 @@ uint32_t ShowCompactResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->compacts.clear(); - uint32_t _size697; - ::apache::thrift::protocol::TType _etype700; - xfer += iprot->readListBegin(_etype700, _size697); - this->compacts.resize(_size697); - uint32_t _i701; - for (_i701 = 0; _i701 < _size697; ++_i701) + uint32_t _size741; + ::apache::thrift::protocol::TType _etype744; + xfer += iprot->readListBegin(_etype744, _size741); + this->compacts.resize(_size741); + uint32_t _i745; + for (_i745 = 0; _i745 < _size741; ++_i745) { - xfer += this->compacts[_i701].read(iprot); + xfer += this->compacts[_i745].read(iprot); } xfer += iprot->readListEnd(); } @@ -17022,10 +17933,10 @@ uint32_t ShowCompactResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("compacts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->compacts.size())); - std::vector ::const_iterator _iter702; - for (_iter702 = this->compacts.begin(); _iter702 != this->compacts.end(); ++_iter702) + std::vector ::const_iterator _iter746; + for (_iter746 = this->compacts.begin(); _iter746 != this->compacts.end(); ++_iter746) { - xfer += (*_iter702).write(oprot); + xfer += (*_iter746).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17041,11 +17952,11 @@ void swap(ShowCompactResponse &a, ShowCompactResponse &b) { swap(a.compacts, b.compacts); } -ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other703) { - compacts = other703.compacts; +ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other747) { + compacts = other747.compacts; } -ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other704) { - compacts = other704.compacts; +ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other748) { + compacts = other748.compacts; return *this; } void ShowCompactResponse::printTo(std::ostream& out) const { @@ -17134,14 +18045,14 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionnames.clear(); - uint32_t _size705; - ::apache::thrift::protocol::TType _etype708; - xfer += iprot->readListBegin(_etype708, _size705); - this->partitionnames.resize(_size705); - uint32_t _i709; - for (_i709 = 0; _i709 < _size705; ++_i709) + uint32_t _size749; + ::apache::thrift::protocol::TType _etype752; + xfer += iprot->readListBegin(_etype752, _size749); + this->partitionnames.resize(_size749); + uint32_t _i753; + for (_i753 = 0; _i753 < _size749; ++_i753) { - xfer += iprot->readString(this->partitionnames[_i709]); + xfer += iprot->readString(this->partitionnames[_i753]); } xfer += iprot->readListEnd(); } @@ -17152,9 +18063,9 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast710; - xfer += iprot->readI32(ecast710); - this->operationType = (DataOperationType::type)ecast710; + int32_t ecast754; + xfer += iprot->readI32(ecast754); + this->operationType = (DataOperationType::type)ecast754; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -17200,10 +18111,10 @@ uint32_t AddDynamicPartitions::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitionnames", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionnames.size())); - std::vector ::const_iterator _iter711; - for (_iter711 = this->partitionnames.begin(); _iter711 != this->partitionnames.end(); ++_iter711) + std::vector ::const_iterator _iter755; + for (_iter755 = this->partitionnames.begin(); _iter755 != this->partitionnames.end(); ++_iter755) { - xfer += oprot->writeString((*_iter711)); + xfer += oprot->writeString((*_iter755)); } xfer += oprot->writeListEnd(); } @@ -17229,21 +18140,21 @@ void swap(AddDynamicPartitions &a, AddDynamicPartitions &b) { swap(a.__isset, b.__isset); } -AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other712) { - txnid = other712.txnid; - dbname = other712.dbname; - tablename = other712.tablename; - partitionnames = other712.partitionnames; - operationType = other712.operationType; - __isset = other712.__isset; -} -AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other713) { - txnid = other713.txnid; - dbname = other713.dbname; - tablename = other713.tablename; - partitionnames = other713.partitionnames; - operationType = other713.operationType; - __isset = other713.__isset; +AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other756) { + txnid = other756.txnid; + dbname = other756.dbname; + tablename = other756.tablename; + partitionnames = other756.partitionnames; + operationType = other756.operationType; + __isset = other756.__isset; +} +AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other757) { + txnid = other757.txnid; + dbname = other757.dbname; + tablename = other757.tablename; + partitionnames = other757.partitionnames; + operationType = other757.operationType; + __isset = other757.__isset; return *this; } void AddDynamicPartitions::printTo(std::ostream& out) const { @@ -17444,25 +18355,25 @@ void swap(BasicTxnInfo &a, BasicTxnInfo &b) { swap(a.__isset, b.__isset); } -BasicTxnInfo::BasicTxnInfo(const BasicTxnInfo& other714) { - isnull = other714.isnull; - id = other714.id; - time = other714.time; - txnid = other714.txnid; - dbname = other714.dbname; - tablename = other714.tablename; - partitionname = other714.partitionname; - __isset = other714.__isset; -} -BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other715) { - isnull = other715.isnull; - id = other715.id; - time = other715.time; - txnid = other715.txnid; - dbname = other715.dbname; - tablename = other715.tablename; - partitionname = other715.partitionname; - __isset = other715.__isset; +BasicTxnInfo::BasicTxnInfo(const BasicTxnInfo& other758) { + isnull = other758.isnull; + id = other758.id; + time = other758.time; + txnid = other758.txnid; + dbname = other758.dbname; + tablename = other758.tablename; + partitionname = other758.partitionname; + __isset = other758.__isset; +} +BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other759) { + isnull = other759.isnull; + id = other759.id; + time = other759.time; + txnid = other759.txnid; + dbname = other759.dbname; + tablename = other759.tablename; + partitionname = other759.partitionname; + __isset = other759.__isset; return *this; } void BasicTxnInfo::printTo(std::ostream& out) const { @@ -17526,14 +18437,14 @@ uint32_t TxnsSnapshot::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->open_txns.clear(); - uint32_t _size716; - ::apache::thrift::protocol::TType _etype719; - xfer += iprot->readListBegin(_etype719, _size716); - this->open_txns.resize(_size716); - uint32_t _i720; - for (_i720 = 0; _i720 < _size716; ++_i720) + uint32_t _size760; + ::apache::thrift::protocol::TType _etype763; + xfer += iprot->readListBegin(_etype763, _size760); + this->open_txns.resize(_size760); + uint32_t _i764; + for (_i764 = 0; _i764 < _size760; ++_i764) { - xfer += iprot->readI64(this->open_txns[_i720]); + xfer += iprot->readI64(this->open_txns[_i764]); } xfer += iprot->readListEnd(); } @@ -17570,10 +18481,10 @@ uint32_t TxnsSnapshot::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->open_txns.size())); - std::vector ::const_iterator _iter721; - for (_iter721 = this->open_txns.begin(); _iter721 != this->open_txns.end(); ++_iter721) + std::vector ::const_iterator _iter765; + for (_iter765 = this->open_txns.begin(); _iter765 != this->open_txns.end(); ++_iter765) { - xfer += oprot->writeI64((*_iter721)); + xfer += oprot->writeI64((*_iter765)); } xfer += oprot->writeListEnd(); } @@ -17590,13 +18501,13 @@ void swap(TxnsSnapshot &a, TxnsSnapshot &b) { swap(a.open_txns, b.open_txns); } -TxnsSnapshot::TxnsSnapshot(const TxnsSnapshot& other722) { - txn_high_water_mark = other722.txn_high_water_mark; - open_txns = other722.open_txns; +TxnsSnapshot::TxnsSnapshot(const TxnsSnapshot& other766) { + txn_high_water_mark = other766.txn_high_water_mark; + open_txns = other766.open_txns; } -TxnsSnapshot& TxnsSnapshot::operator=(const TxnsSnapshot& other723) { - txn_high_water_mark = other723.txn_high_water_mark; - open_txns = other723.open_txns; +TxnsSnapshot& TxnsSnapshot::operator=(const TxnsSnapshot& other767) { + txn_high_water_mark = other767.txn_high_water_mark; + open_txns = other767.open_txns; return *this; } void TxnsSnapshot::printTo(std::ostream& out) const { @@ -17699,15 +18610,15 @@ void swap(NotificationEventRequest &a, NotificationEventRequest &b) { swap(a.__isset, b.__isset); } -NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other724) { - lastEvent = other724.lastEvent; - maxEvents = other724.maxEvents; - __isset = other724.__isset; +NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other768) { + lastEvent = other768.lastEvent; + maxEvents = other768.maxEvents; + __isset = other768.__isset; } -NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other725) { - lastEvent = other725.lastEvent; - maxEvents = other725.maxEvents; - __isset = other725.__isset; +NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other769) { + lastEvent = other769.lastEvent; + maxEvents = other769.maxEvents; + __isset = other769.__isset; return *this; } void NotificationEventRequest::printTo(std::ostream& out) const { @@ -17908,25 +18819,25 @@ void swap(NotificationEvent &a, NotificationEvent &b) { swap(a.__isset, b.__isset); } -NotificationEvent::NotificationEvent(const NotificationEvent& other726) { - eventId = other726.eventId; - eventTime = other726.eventTime; - eventType = other726.eventType; - dbName = other726.dbName; - tableName = other726.tableName; - message = other726.message; - messageFormat = other726.messageFormat; - __isset = other726.__isset; -} -NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other727) { - eventId = other727.eventId; - eventTime = other727.eventTime; - eventType = other727.eventType; - dbName = other727.dbName; - tableName = other727.tableName; - message = other727.message; - messageFormat = other727.messageFormat; - __isset = other727.__isset; +NotificationEvent::NotificationEvent(const NotificationEvent& other770) { + eventId = other770.eventId; + eventTime = other770.eventTime; + eventType = other770.eventType; + dbName = other770.dbName; + tableName = other770.tableName; + message = other770.message; + messageFormat = other770.messageFormat; + __isset = other770.__isset; +} +NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other771) { + eventId = other771.eventId; + eventTime = other771.eventTime; + eventType = other771.eventType; + dbName = other771.dbName; + tableName = other771.tableName; + message = other771.message; + messageFormat = other771.messageFormat; + __isset = other771.__isset; return *this; } void NotificationEvent::printTo(std::ostream& out) const { @@ -17977,14 +18888,14 @@ uint32_t NotificationEventResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->events.clear(); - uint32_t _size728; - ::apache::thrift::protocol::TType _etype731; - xfer += iprot->readListBegin(_etype731, _size728); - this->events.resize(_size728); - uint32_t _i732; - for (_i732 = 0; _i732 < _size728; ++_i732) + uint32_t _size772; + ::apache::thrift::protocol::TType _etype775; + xfer += iprot->readListBegin(_etype775, _size772); + this->events.resize(_size772); + uint32_t _i776; + for (_i776 = 0; _i776 < _size772; ++_i776) { - xfer += this->events[_i732].read(iprot); + xfer += this->events[_i776].read(iprot); } xfer += iprot->readListEnd(); } @@ -18015,10 +18926,10 @@ uint32_t NotificationEventResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("events", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->events.size())); - std::vector ::const_iterator _iter733; - for (_iter733 = this->events.begin(); _iter733 != this->events.end(); ++_iter733) + std::vector ::const_iterator _iter777; + for (_iter777 = this->events.begin(); _iter777 != this->events.end(); ++_iter777) { - xfer += (*_iter733).write(oprot); + xfer += (*_iter777).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18034,11 +18945,11 @@ void swap(NotificationEventResponse &a, NotificationEventResponse &b) { swap(a.events, b.events); } -NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other734) { - events = other734.events; +NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other778) { + events = other778.events; } -NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other735) { - events = other735.events; +NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other779) { + events = other779.events; return *this; } void NotificationEventResponse::printTo(std::ostream& out) const { @@ -18120,11 +19031,11 @@ void swap(CurrentNotificationEventId &a, CurrentNotificationEventId &b) { swap(a.eventId, b.eventId); } -CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other736) { - eventId = other736.eventId; +CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other780) { + eventId = other780.eventId; } -CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other737) { - eventId = other737.eventId; +CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other781) { + eventId = other781.eventId; return *this; } void CurrentNotificationEventId::printTo(std::ostream& out) const { @@ -18226,13 +19137,13 @@ void swap(NotificationEventsCountRequest &a, NotificationEventsCountRequest &b) swap(a.dbName, b.dbName); } -NotificationEventsCountRequest::NotificationEventsCountRequest(const NotificationEventsCountRequest& other738) { - fromEventId = other738.fromEventId; - dbName = other738.dbName; +NotificationEventsCountRequest::NotificationEventsCountRequest(const NotificationEventsCountRequest& other782) { + fromEventId = other782.fromEventId; + dbName = other782.dbName; } -NotificationEventsCountRequest& NotificationEventsCountRequest::operator=(const NotificationEventsCountRequest& other739) { - fromEventId = other739.fromEventId; - dbName = other739.dbName; +NotificationEventsCountRequest& NotificationEventsCountRequest::operator=(const NotificationEventsCountRequest& other783) { + fromEventId = other783.fromEventId; + dbName = other783.dbName; return *this; } void NotificationEventsCountRequest::printTo(std::ostream& out) const { @@ -18315,11 +19226,11 @@ void swap(NotificationEventsCountResponse &a, NotificationEventsCountResponse &b swap(a.eventsCount, b.eventsCount); } -NotificationEventsCountResponse::NotificationEventsCountResponse(const NotificationEventsCountResponse& other740) { - eventsCount = other740.eventsCount; +NotificationEventsCountResponse::NotificationEventsCountResponse(const NotificationEventsCountResponse& other784) { + eventsCount = other784.eventsCount; } -NotificationEventsCountResponse& NotificationEventsCountResponse::operator=(const NotificationEventsCountResponse& other741) { - eventsCount = other741.eventsCount; +NotificationEventsCountResponse& NotificationEventsCountResponse::operator=(const NotificationEventsCountResponse& other785) { + eventsCount = other785.eventsCount; return *this; } void NotificationEventsCountResponse::printTo(std::ostream& out) const { @@ -18382,14 +19293,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAdded.clear(); - uint32_t _size742; - ::apache::thrift::protocol::TType _etype745; - xfer += iprot->readListBegin(_etype745, _size742); - this->filesAdded.resize(_size742); - uint32_t _i746; - for (_i746 = 0; _i746 < _size742; ++_i746) + uint32_t _size786; + ::apache::thrift::protocol::TType _etype789; + xfer += iprot->readListBegin(_etype789, _size786); + this->filesAdded.resize(_size786); + uint32_t _i790; + for (_i790 = 0; _i790 < _size786; ++_i790) { - xfer += iprot->readString(this->filesAdded[_i746]); + xfer += iprot->readString(this->filesAdded[_i790]); } xfer += iprot->readListEnd(); } @@ -18402,14 +19313,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAddedChecksum.clear(); - uint32_t _size747; - ::apache::thrift::protocol::TType _etype750; - xfer += iprot->readListBegin(_etype750, _size747); - this->filesAddedChecksum.resize(_size747); - uint32_t _i751; - for (_i751 = 0; _i751 < _size747; ++_i751) + uint32_t _size791; + ::apache::thrift::protocol::TType _etype794; + xfer += iprot->readListBegin(_etype794, _size791); + this->filesAddedChecksum.resize(_size791); + uint32_t _i795; + for (_i795 = 0; _i795 < _size791; ++_i795) { - xfer += iprot->readString(this->filesAddedChecksum[_i751]); + xfer += iprot->readString(this->filesAddedChecksum[_i795]); } xfer += iprot->readListEnd(); } @@ -18445,10 +19356,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAdded", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAdded.size())); - std::vector ::const_iterator _iter752; - for (_iter752 = this->filesAdded.begin(); _iter752 != this->filesAdded.end(); ++_iter752) + std::vector ::const_iterator _iter796; + for (_iter796 = this->filesAdded.begin(); _iter796 != this->filesAdded.end(); ++_iter796) { - xfer += oprot->writeString((*_iter752)); + xfer += oprot->writeString((*_iter796)); } xfer += oprot->writeListEnd(); } @@ -18458,10 +19369,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAddedChecksum", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAddedChecksum.size())); - std::vector ::const_iterator _iter753; - for (_iter753 = this->filesAddedChecksum.begin(); _iter753 != this->filesAddedChecksum.end(); ++_iter753) + std::vector ::const_iterator _iter797; + for (_iter797 = this->filesAddedChecksum.begin(); _iter797 != this->filesAddedChecksum.end(); ++_iter797) { - xfer += oprot->writeString((*_iter753)); + xfer += oprot->writeString((*_iter797)); } xfer += oprot->writeListEnd(); } @@ -18480,17 +19391,17 @@ void swap(InsertEventRequestData &a, InsertEventRequestData &b) { swap(a.__isset, b.__isset); } -InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other754) { - replace = other754.replace; - filesAdded = other754.filesAdded; - filesAddedChecksum = other754.filesAddedChecksum; - __isset = other754.__isset; +InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other798) { + replace = other798.replace; + filesAdded = other798.filesAdded; + filesAddedChecksum = other798.filesAddedChecksum; + __isset = other798.__isset; } -InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other755) { - replace = other755.replace; - filesAdded = other755.filesAdded; - filesAddedChecksum = other755.filesAddedChecksum; - __isset = other755.__isset; +InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other799) { + replace = other799.replace; + filesAdded = other799.filesAdded; + filesAddedChecksum = other799.filesAddedChecksum; + __isset = other799.__isset; return *this; } void InsertEventRequestData::printTo(std::ostream& out) const { @@ -18572,13 +19483,13 @@ void swap(FireEventRequestData &a, FireEventRequestData &b) { swap(a.__isset, b.__isset); } -FireEventRequestData::FireEventRequestData(const FireEventRequestData& other756) { - insertData = other756.insertData; - __isset = other756.__isset; +FireEventRequestData::FireEventRequestData(const FireEventRequestData& other800) { + insertData = other800.insertData; + __isset = other800.__isset; } -FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other757) { - insertData = other757.insertData; - __isset = other757.__isset; +FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other801) { + insertData = other801.insertData; + __isset = other801.__isset; return *this; } void FireEventRequestData::printTo(std::ostream& out) const { @@ -18675,14 +19586,14 @@ uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionVals.clear(); - uint32_t _size758; - ::apache::thrift::protocol::TType _etype761; - xfer += iprot->readListBegin(_etype761, _size758); - this->partitionVals.resize(_size758); - uint32_t _i762; - for (_i762 = 0; _i762 < _size758; ++_i762) + uint32_t _size802; + ::apache::thrift::protocol::TType _etype805; + xfer += iprot->readListBegin(_etype805, _size802); + this->partitionVals.resize(_size802); + uint32_t _i806; + for (_i806 = 0; _i806 < _size802; ++_i806) { - xfer += iprot->readString(this->partitionVals[_i762]); + xfer += iprot->readString(this->partitionVals[_i806]); } xfer += iprot->readListEnd(); } @@ -18734,10 +19645,10 @@ uint32_t FireEventRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("partitionVals", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionVals.size())); - std::vector ::const_iterator _iter763; - for (_iter763 = this->partitionVals.begin(); _iter763 != this->partitionVals.end(); ++_iter763) + std::vector ::const_iterator _iter807; + for (_iter807 = this->partitionVals.begin(); _iter807 != this->partitionVals.end(); ++_iter807) { - xfer += oprot->writeString((*_iter763)); + xfer += oprot->writeString((*_iter807)); } xfer += oprot->writeListEnd(); } @@ -18758,21 +19669,21 @@ void swap(FireEventRequest &a, FireEventRequest &b) { swap(a.__isset, b.__isset); } -FireEventRequest::FireEventRequest(const FireEventRequest& other764) { - successful = other764.successful; - data = other764.data; - dbName = other764.dbName; - tableName = other764.tableName; - partitionVals = other764.partitionVals; - __isset = other764.__isset; -} -FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other765) { - successful = other765.successful; - data = other765.data; - dbName = other765.dbName; - tableName = other765.tableName; - partitionVals = other765.partitionVals; - __isset = other765.__isset; +FireEventRequest::FireEventRequest(const FireEventRequest& other808) { + successful = other808.successful; + data = other808.data; + dbName = other808.dbName; + tableName = other808.tableName; + partitionVals = other808.partitionVals; + __isset = other808.__isset; +} +FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other809) { + successful = other809.successful; + data = other809.data; + dbName = other809.dbName; + tableName = other809.tableName; + partitionVals = other809.partitionVals; + __isset = other809.__isset; return *this; } void FireEventRequest::printTo(std::ostream& out) const { @@ -18835,11 +19746,11 @@ void swap(FireEventResponse &a, FireEventResponse &b) { (void) b; } -FireEventResponse::FireEventResponse(const FireEventResponse& other766) { - (void) other766; +FireEventResponse::FireEventResponse(const FireEventResponse& other810) { + (void) other810; } -FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other767) { - (void) other767; +FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other811) { + (void) other811; return *this; } void FireEventResponse::printTo(std::ostream& out) const { @@ -18939,15 +19850,15 @@ void swap(MetadataPpdResult &a, MetadataPpdResult &b) { swap(a.__isset, b.__isset); } -MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other768) { - metadata = other768.metadata; - includeBitset = other768.includeBitset; - __isset = other768.__isset; +MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other812) { + metadata = other812.metadata; + includeBitset = other812.includeBitset; + __isset = other812.__isset; } -MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other769) { - metadata = other769.metadata; - includeBitset = other769.includeBitset; - __isset = other769.__isset; +MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other813) { + metadata = other813.metadata; + includeBitset = other813.includeBitset; + __isset = other813.__isset; return *this; } void MetadataPpdResult::printTo(std::ostream& out) const { @@ -18998,17 +19909,17 @@ uint32_t GetFileMetadataByExprResult::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size770; - ::apache::thrift::protocol::TType _ktype771; - ::apache::thrift::protocol::TType _vtype772; - xfer += iprot->readMapBegin(_ktype771, _vtype772, _size770); - uint32_t _i774; - for (_i774 = 0; _i774 < _size770; ++_i774) + uint32_t _size814; + ::apache::thrift::protocol::TType _ktype815; + ::apache::thrift::protocol::TType _vtype816; + xfer += iprot->readMapBegin(_ktype815, _vtype816, _size814); + uint32_t _i818; + for (_i818 = 0; _i818 < _size814; ++_i818) { - int64_t _key775; - xfer += iprot->readI64(_key775); - MetadataPpdResult& _val776 = this->metadata[_key775]; - xfer += _val776.read(iprot); + int64_t _key819; + xfer += iprot->readI64(_key819); + MetadataPpdResult& _val820 = this->metadata[_key819]; + xfer += _val820.read(iprot); } xfer += iprot->readMapEnd(); } @@ -19049,11 +19960,11 @@ uint32_t GetFileMetadataByExprResult::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRUCT, static_cast(this->metadata.size())); - std::map ::const_iterator _iter777; - for (_iter777 = this->metadata.begin(); _iter777 != this->metadata.end(); ++_iter777) + std::map ::const_iterator _iter821; + for (_iter821 = this->metadata.begin(); _iter821 != this->metadata.end(); ++_iter821) { - xfer += oprot->writeI64(_iter777->first); - xfer += _iter777->second.write(oprot); + xfer += oprot->writeI64(_iter821->first); + xfer += _iter821->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -19074,13 +19985,13 @@ void swap(GetFileMetadataByExprResult &a, GetFileMetadataByExprResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other778) { - metadata = other778.metadata; - isSupported = other778.isSupported; +GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other822) { + metadata = other822.metadata; + isSupported = other822.isSupported; } -GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other779) { - metadata = other779.metadata; - isSupported = other779.isSupported; +GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other823) { + metadata = other823.metadata; + isSupported = other823.isSupported; return *this; } void GetFileMetadataByExprResult::printTo(std::ostream& out) const { @@ -19141,14 +20052,14 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size780; - ::apache::thrift::protocol::TType _etype783; - xfer += iprot->readListBegin(_etype783, _size780); - this->fileIds.resize(_size780); - uint32_t _i784; - for (_i784 = 0; _i784 < _size780; ++_i784) + uint32_t _size824; + ::apache::thrift::protocol::TType _etype827; + xfer += iprot->readListBegin(_etype827, _size824); + this->fileIds.resize(_size824); + uint32_t _i828; + for (_i828 = 0; _i828 < _size824; ++_i828) { - xfer += iprot->readI64(this->fileIds[_i784]); + xfer += iprot->readI64(this->fileIds[_i828]); } xfer += iprot->readListEnd(); } @@ -19175,9 +20086,9 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast785; - xfer += iprot->readI32(ecast785); - this->type = (FileMetadataExprType::type)ecast785; + int32_t ecast829; + xfer += iprot->readI32(ecast829); + this->type = (FileMetadataExprType::type)ecast829; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -19207,10 +20118,10 @@ uint32_t GetFileMetadataByExprRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter786; - for (_iter786 = this->fileIds.begin(); _iter786 != this->fileIds.end(); ++_iter786) + std::vector ::const_iterator _iter830; + for (_iter830 = this->fileIds.begin(); _iter830 != this->fileIds.end(); ++_iter830) { - xfer += oprot->writeI64((*_iter786)); + xfer += oprot->writeI64((*_iter830)); } xfer += oprot->writeListEnd(); } @@ -19244,19 +20155,19 @@ void swap(GetFileMetadataByExprRequest &a, GetFileMetadataByExprRequest &b) { swap(a.__isset, b.__isset); } -GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other787) { - fileIds = other787.fileIds; - expr = other787.expr; - doGetFooters = other787.doGetFooters; - type = other787.type; - __isset = other787.__isset; +GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other831) { + fileIds = other831.fileIds; + expr = other831.expr; + doGetFooters = other831.doGetFooters; + type = other831.type; + __isset = other831.__isset; } -GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other788) { - fileIds = other788.fileIds; - expr = other788.expr; - doGetFooters = other788.doGetFooters; - type = other788.type; - __isset = other788.__isset; +GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other832) { + fileIds = other832.fileIds; + expr = other832.expr; + doGetFooters = other832.doGetFooters; + type = other832.type; + __isset = other832.__isset; return *this; } void GetFileMetadataByExprRequest::printTo(std::ostream& out) const { @@ -19309,17 +20220,17 @@ uint32_t GetFileMetadataResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size789; - ::apache::thrift::protocol::TType _ktype790; - ::apache::thrift::protocol::TType _vtype791; - xfer += iprot->readMapBegin(_ktype790, _vtype791, _size789); - uint32_t _i793; - for (_i793 = 0; _i793 < _size789; ++_i793) + uint32_t _size833; + ::apache::thrift::protocol::TType _ktype834; + ::apache::thrift::protocol::TType _vtype835; + xfer += iprot->readMapBegin(_ktype834, _vtype835, _size833); + uint32_t _i837; + for (_i837 = 0; _i837 < _size833; ++_i837) { - int64_t _key794; - xfer += iprot->readI64(_key794); - std::string& _val795 = this->metadata[_key794]; - xfer += iprot->readBinary(_val795); + int64_t _key838; + xfer += iprot->readI64(_key838); + std::string& _val839 = this->metadata[_key838]; + xfer += iprot->readBinary(_val839); } xfer += iprot->readMapEnd(); } @@ -19360,11 +20271,11 @@ uint32_t GetFileMetadataResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::map ::const_iterator _iter796; - for (_iter796 = this->metadata.begin(); _iter796 != this->metadata.end(); ++_iter796) + std::map ::const_iterator _iter840; + for (_iter840 = this->metadata.begin(); _iter840 != this->metadata.end(); ++_iter840) { - xfer += oprot->writeI64(_iter796->first); - xfer += oprot->writeBinary(_iter796->second); + xfer += oprot->writeI64(_iter840->first); + xfer += oprot->writeBinary(_iter840->second); } xfer += oprot->writeMapEnd(); } @@ -19385,13 +20296,13 @@ void swap(GetFileMetadataResult &a, GetFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other797) { - metadata = other797.metadata; - isSupported = other797.isSupported; +GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other841) { + metadata = other841.metadata; + isSupported = other841.isSupported; } -GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other798) { - metadata = other798.metadata; - isSupported = other798.isSupported; +GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other842) { + metadata = other842.metadata; + isSupported = other842.isSupported; return *this; } void GetFileMetadataResult::printTo(std::ostream& out) const { @@ -19437,14 +20348,14 @@ uint32_t GetFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size799; - ::apache::thrift::protocol::TType _etype802; - xfer += iprot->readListBegin(_etype802, _size799); - this->fileIds.resize(_size799); - uint32_t _i803; - for (_i803 = 0; _i803 < _size799; ++_i803) + uint32_t _size843; + ::apache::thrift::protocol::TType _etype846; + xfer += iprot->readListBegin(_etype846, _size843); + this->fileIds.resize(_size843); + uint32_t _i847; + for (_i847 = 0; _i847 < _size843; ++_i847) { - xfer += iprot->readI64(this->fileIds[_i803]); + xfer += iprot->readI64(this->fileIds[_i847]); } xfer += iprot->readListEnd(); } @@ -19475,10 +20386,10 @@ uint32_t GetFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter804; - for (_iter804 = this->fileIds.begin(); _iter804 != this->fileIds.end(); ++_iter804) + std::vector ::const_iterator _iter848; + for (_iter848 = this->fileIds.begin(); _iter848 != this->fileIds.end(); ++_iter848) { - xfer += oprot->writeI64((*_iter804)); + xfer += oprot->writeI64((*_iter848)); } xfer += oprot->writeListEnd(); } @@ -19494,11 +20405,11 @@ void swap(GetFileMetadataRequest &a, GetFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other805) { - fileIds = other805.fileIds; +GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other849) { + fileIds = other849.fileIds; } -GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other806) { - fileIds = other806.fileIds; +GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other850) { + fileIds = other850.fileIds; return *this; } void GetFileMetadataRequest::printTo(std::ostream& out) const { @@ -19557,11 +20468,11 @@ void swap(PutFileMetadataResult &a, PutFileMetadataResult &b) { (void) b; } -PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other807) { - (void) other807; +PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other851) { + (void) other851; } -PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other808) { - (void) other808; +PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other852) { + (void) other852; return *this; } void PutFileMetadataResult::printTo(std::ostream& out) const { @@ -19615,14 +20526,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size809; - ::apache::thrift::protocol::TType _etype812; - xfer += iprot->readListBegin(_etype812, _size809); - this->fileIds.resize(_size809); - uint32_t _i813; - for (_i813 = 0; _i813 < _size809; ++_i813) + uint32_t _size853; + ::apache::thrift::protocol::TType _etype856; + xfer += iprot->readListBegin(_etype856, _size853); + this->fileIds.resize(_size853); + uint32_t _i857; + for (_i857 = 0; _i857 < _size853; ++_i857) { - xfer += iprot->readI64(this->fileIds[_i813]); + xfer += iprot->readI64(this->fileIds[_i857]); } xfer += iprot->readListEnd(); } @@ -19635,14 +20546,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->metadata.clear(); - uint32_t _size814; - ::apache::thrift::protocol::TType _etype817; - xfer += iprot->readListBegin(_etype817, _size814); - this->metadata.resize(_size814); - uint32_t _i818; - for (_i818 = 0; _i818 < _size814; ++_i818) + uint32_t _size858; + ::apache::thrift::protocol::TType _etype861; + xfer += iprot->readListBegin(_etype861, _size858); + this->metadata.resize(_size858); + uint32_t _i862; + for (_i862 = 0; _i862 < _size858; ++_i862) { - xfer += iprot->readBinary(this->metadata[_i818]); + xfer += iprot->readBinary(this->metadata[_i862]); } xfer += iprot->readListEnd(); } @@ -19653,9 +20564,9 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast819; - xfer += iprot->readI32(ecast819); - this->type = (FileMetadataExprType::type)ecast819; + int32_t ecast863; + xfer += iprot->readI32(ecast863); + this->type = (FileMetadataExprType::type)ecast863; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -19685,10 +20596,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter820; - for (_iter820 = this->fileIds.begin(); _iter820 != this->fileIds.end(); ++_iter820) + std::vector ::const_iterator _iter864; + for (_iter864 = this->fileIds.begin(); _iter864 != this->fileIds.end(); ++_iter864) { - xfer += oprot->writeI64((*_iter820)); + xfer += oprot->writeI64((*_iter864)); } xfer += oprot->writeListEnd(); } @@ -19697,10 +20608,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::vector ::const_iterator _iter821; - for (_iter821 = this->metadata.begin(); _iter821 != this->metadata.end(); ++_iter821) + std::vector ::const_iterator _iter865; + for (_iter865 = this->metadata.begin(); _iter865 != this->metadata.end(); ++_iter865) { - xfer += oprot->writeBinary((*_iter821)); + xfer += oprot->writeBinary((*_iter865)); } xfer += oprot->writeListEnd(); } @@ -19724,17 +20635,17 @@ void swap(PutFileMetadataRequest &a, PutFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other822) { - fileIds = other822.fileIds; - metadata = other822.metadata; - type = other822.type; - __isset = other822.__isset; +PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other866) { + fileIds = other866.fileIds; + metadata = other866.metadata; + type = other866.type; + __isset = other866.__isset; } -PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other823) { - fileIds = other823.fileIds; - metadata = other823.metadata; - type = other823.type; - __isset = other823.__isset; +PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other867) { + fileIds = other867.fileIds; + metadata = other867.metadata; + type = other867.type; + __isset = other867.__isset; return *this; } void PutFileMetadataRequest::printTo(std::ostream& out) const { @@ -19795,11 +20706,11 @@ void swap(ClearFileMetadataResult &a, ClearFileMetadataResult &b) { (void) b; } -ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other824) { - (void) other824; +ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other868) { + (void) other868; } -ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other825) { - (void) other825; +ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other869) { + (void) other869; return *this; } void ClearFileMetadataResult::printTo(std::ostream& out) const { @@ -19843,14 +20754,14 @@ uint32_t ClearFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size826; - ::apache::thrift::protocol::TType _etype829; - xfer += iprot->readListBegin(_etype829, _size826); - this->fileIds.resize(_size826); - uint32_t _i830; - for (_i830 = 0; _i830 < _size826; ++_i830) + uint32_t _size870; + ::apache::thrift::protocol::TType _etype873; + xfer += iprot->readListBegin(_etype873, _size870); + this->fileIds.resize(_size870); + uint32_t _i874; + for (_i874 = 0; _i874 < _size870; ++_i874) { - xfer += iprot->readI64(this->fileIds[_i830]); + xfer += iprot->readI64(this->fileIds[_i874]); } xfer += iprot->readListEnd(); } @@ -19881,10 +20792,10 @@ uint32_t ClearFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter831; - for (_iter831 = this->fileIds.begin(); _iter831 != this->fileIds.end(); ++_iter831) + std::vector ::const_iterator _iter875; + for (_iter875 = this->fileIds.begin(); _iter875 != this->fileIds.end(); ++_iter875) { - xfer += oprot->writeI64((*_iter831)); + xfer += oprot->writeI64((*_iter875)); } xfer += oprot->writeListEnd(); } @@ -19900,11 +20811,11 @@ void swap(ClearFileMetadataRequest &a, ClearFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other832) { - fileIds = other832.fileIds; +ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other876) { + fileIds = other876.fileIds; } -ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other833) { - fileIds = other833.fileIds; +ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other877) { + fileIds = other877.fileIds; return *this; } void ClearFileMetadataRequest::printTo(std::ostream& out) const { @@ -19986,11 +20897,11 @@ void swap(CacheFileMetadataResult &a, CacheFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other834) { - isSupported = other834.isSupported; +CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other878) { + isSupported = other878.isSupported; } -CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other835) { - isSupported = other835.isSupported; +CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other879) { + isSupported = other879.isSupported; return *this; } void CacheFileMetadataResult::printTo(std::ostream& out) const { @@ -20131,19 +21042,19 @@ void swap(CacheFileMetadataRequest &a, CacheFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other836) { - dbName = other836.dbName; - tblName = other836.tblName; - partName = other836.partName; - isAllParts = other836.isAllParts; - __isset = other836.__isset; +CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other880) { + dbName = other880.dbName; + tblName = other880.tblName; + partName = other880.partName; + isAllParts = other880.isAllParts; + __isset = other880.__isset; } -CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other837) { - dbName = other837.dbName; - tblName = other837.tblName; - partName = other837.partName; - isAllParts = other837.isAllParts; - __isset = other837.__isset; +CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other881) { + dbName = other881.dbName; + tblName = other881.tblName; + partName = other881.partName; + isAllParts = other881.isAllParts; + __isset = other881.__isset; return *this; } void CacheFileMetadataRequest::printTo(std::ostream& out) const { @@ -20191,14 +21102,14 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->functions.clear(); - uint32_t _size838; - ::apache::thrift::protocol::TType _etype841; - xfer += iprot->readListBegin(_etype841, _size838); - this->functions.resize(_size838); - uint32_t _i842; - for (_i842 = 0; _i842 < _size838; ++_i842) + uint32_t _size882; + ::apache::thrift::protocol::TType _etype885; + xfer += iprot->readListBegin(_etype885, _size882); + this->functions.resize(_size882); + uint32_t _i886; + for (_i886 = 0; _i886 < _size882; ++_i886) { - xfer += this->functions[_i842].read(iprot); + xfer += this->functions[_i886].read(iprot); } xfer += iprot->readListEnd(); } @@ -20228,10 +21139,10 @@ uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); - std::vector ::const_iterator _iter843; - for (_iter843 = this->functions.begin(); _iter843 != this->functions.end(); ++_iter843) + std::vector ::const_iterator _iter887; + for (_iter887 = this->functions.begin(); _iter887 != this->functions.end(); ++_iter887) { - xfer += (*_iter843).write(oprot); + xfer += (*_iter887).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20248,13 +21159,13 @@ void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { swap(a.__isset, b.__isset); } -GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other844) { - functions = other844.functions; - __isset = other844.__isset; +GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other888) { + functions = other888.functions; + __isset = other888.__isset; } -GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other845) { - functions = other845.functions; - __isset = other845.__isset; +GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other889) { + functions = other889.functions; + __isset = other889.__isset; return *this; } void GetAllFunctionsResponse::printTo(std::ostream& out) const { @@ -20299,16 +21210,16 @@ uint32_t ClientCapabilities::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size846; - ::apache::thrift::protocol::TType _etype849; - xfer += iprot->readListBegin(_etype849, _size846); - this->values.resize(_size846); - uint32_t _i850; - for (_i850 = 0; _i850 < _size846; ++_i850) + uint32_t _size890; + ::apache::thrift::protocol::TType _etype893; + xfer += iprot->readListBegin(_etype893, _size890); + this->values.resize(_size890); + uint32_t _i894; + for (_i894 = 0; _i894 < _size890; ++_i894) { - int32_t ecast851; - xfer += iprot->readI32(ecast851); - this->values[_i850] = (ClientCapability::type)ecast851; + int32_t ecast895; + xfer += iprot->readI32(ecast895); + this->values[_i894] = (ClientCapability::type)ecast895; } xfer += iprot->readListEnd(); } @@ -20339,10 +21250,10 @@ uint32_t ClientCapabilities::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I32, static_cast(this->values.size())); - std::vector ::const_iterator _iter852; - for (_iter852 = this->values.begin(); _iter852 != this->values.end(); ++_iter852) + std::vector ::const_iterator _iter896; + for (_iter896 = this->values.begin(); _iter896 != this->values.end(); ++_iter896) { - xfer += oprot->writeI32((int32_t)(*_iter852)); + xfer += oprot->writeI32((int32_t)(*_iter896)); } xfer += oprot->writeListEnd(); } @@ -20358,11 +21269,11 @@ void swap(ClientCapabilities &a, ClientCapabilities &b) { swap(a.values, b.values); } -ClientCapabilities::ClientCapabilities(const ClientCapabilities& other853) { - values = other853.values; +ClientCapabilities::ClientCapabilities(const ClientCapabilities& other897) { + values = other897.values; } -ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other854) { - values = other854.values; +ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other898) { + values = other898.values; return *this; } void ClientCapabilities::printTo(std::ostream& out) const { @@ -20484,17 +21395,17 @@ void swap(GetTableRequest &a, GetTableRequest &b) { swap(a.__isset, b.__isset); } -GetTableRequest::GetTableRequest(const GetTableRequest& other855) { - dbName = other855.dbName; - tblName = other855.tblName; - capabilities = other855.capabilities; - __isset = other855.__isset; +GetTableRequest::GetTableRequest(const GetTableRequest& other899) { + dbName = other899.dbName; + tblName = other899.tblName; + capabilities = other899.capabilities; + __isset = other899.__isset; } -GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other856) { - dbName = other856.dbName; - tblName = other856.tblName; - capabilities = other856.capabilities; - __isset = other856.__isset; +GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other900) { + dbName = other900.dbName; + tblName = other900.tblName; + capabilities = other900.capabilities; + __isset = other900.__isset; return *this; } void GetTableRequest::printTo(std::ostream& out) const { @@ -20578,11 +21489,11 @@ void swap(GetTableResult &a, GetTableResult &b) { swap(a.table, b.table); } -GetTableResult::GetTableResult(const GetTableResult& other857) { - table = other857.table; +GetTableResult::GetTableResult(const GetTableResult& other901) { + table = other901.table; } -GetTableResult& GetTableResult::operator=(const GetTableResult& other858) { - table = other858.table; +GetTableResult& GetTableResult::operator=(const GetTableResult& other902) { + table = other902.table; return *this; } void GetTableResult::printTo(std::ostream& out) const { @@ -20645,14 +21556,14 @@ uint32_t GetTablesRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tblNames.clear(); - uint32_t _size859; - ::apache::thrift::protocol::TType _etype862; - xfer += iprot->readListBegin(_etype862, _size859); - this->tblNames.resize(_size859); - uint32_t _i863; - for (_i863 = 0; _i863 < _size859; ++_i863) + uint32_t _size903; + ::apache::thrift::protocol::TType _etype906; + xfer += iprot->readListBegin(_etype906, _size903); + this->tblNames.resize(_size903); + uint32_t _i907; + for (_i907 = 0; _i907 < _size903; ++_i907) { - xfer += iprot->readString(this->tblNames[_i863]); + xfer += iprot->readString(this->tblNames[_i907]); } xfer += iprot->readListEnd(); } @@ -20696,10 +21607,10 @@ uint32_t GetTablesRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tblNames", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tblNames.size())); - std::vector ::const_iterator _iter864; - for (_iter864 = this->tblNames.begin(); _iter864 != this->tblNames.end(); ++_iter864) + std::vector ::const_iterator _iter908; + for (_iter908 = this->tblNames.begin(); _iter908 != this->tblNames.end(); ++_iter908) { - xfer += oprot->writeString((*_iter864)); + xfer += oprot->writeString((*_iter908)); } xfer += oprot->writeListEnd(); } @@ -20723,17 +21634,17 @@ void swap(GetTablesRequest &a, GetTablesRequest &b) { swap(a.__isset, b.__isset); } -GetTablesRequest::GetTablesRequest(const GetTablesRequest& other865) { - dbName = other865.dbName; - tblNames = other865.tblNames; - capabilities = other865.capabilities; - __isset = other865.__isset; +GetTablesRequest::GetTablesRequest(const GetTablesRequest& other909) { + dbName = other909.dbName; + tblNames = other909.tblNames; + capabilities = other909.capabilities; + __isset = other909.__isset; } -GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other866) { - dbName = other866.dbName; - tblNames = other866.tblNames; - capabilities = other866.capabilities; - __isset = other866.__isset; +GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other910) { + dbName = other910.dbName; + tblNames = other910.tblNames; + capabilities = other910.capabilities; + __isset = other910.__isset; return *this; } void GetTablesRequest::printTo(std::ostream& out) const { @@ -20780,14 +21691,14 @@ uint32_t GetTablesResult::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tables.clear(); - uint32_t _size867; - ::apache::thrift::protocol::TType _etype870; - xfer += iprot->readListBegin(_etype870, _size867); - this->tables.resize(_size867); - uint32_t _i871; - for (_i871 = 0; _i871 < _size867; ++_i871) + uint32_t _size911; + ::apache::thrift::protocol::TType _etype914; + xfer += iprot->readListBegin(_etype914, _size911); + this->tables.resize(_size911); + uint32_t _i915; + for (_i915 = 0; _i915 < _size911; ++_i915) { - xfer += this->tables[_i871].read(iprot); + xfer += this->tables[_i915].read(iprot); } xfer += iprot->readListEnd(); } @@ -20818,10 +21729,10 @@ uint32_t GetTablesResult::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeFieldBegin("tables", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tables.size())); - std::vector
::const_iterator _iter872; - for (_iter872 = this->tables.begin(); _iter872 != this->tables.end(); ++_iter872) + std::vector
::const_iterator _iter916; + for (_iter916 = this->tables.begin(); _iter916 != this->tables.end(); ++_iter916) { - xfer += (*_iter872).write(oprot); + xfer += (*_iter916).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20837,11 +21748,11 @@ void swap(GetTablesResult &a, GetTablesResult &b) { swap(a.tables, b.tables); } -GetTablesResult::GetTablesResult(const GetTablesResult& other873) { - tables = other873.tables; +GetTablesResult::GetTablesResult(const GetTablesResult& other917) { + tables = other917.tables; } -GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other874) { - tables = other874.tables; +GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other918) { + tables = other918.tables; return *this; } void GetTablesResult::printTo(std::ostream& out) const { @@ -20943,13 +21854,13 @@ void swap(CmRecycleRequest &a, CmRecycleRequest &b) { swap(a.purge, b.purge); } -CmRecycleRequest::CmRecycleRequest(const CmRecycleRequest& other875) { - dataPath = other875.dataPath; - purge = other875.purge; +CmRecycleRequest::CmRecycleRequest(const CmRecycleRequest& other919) { + dataPath = other919.dataPath; + purge = other919.purge; } -CmRecycleRequest& CmRecycleRequest::operator=(const CmRecycleRequest& other876) { - dataPath = other876.dataPath; - purge = other876.purge; +CmRecycleRequest& CmRecycleRequest::operator=(const CmRecycleRequest& other920) { + dataPath = other920.dataPath; + purge = other920.purge; return *this; } void CmRecycleRequest::printTo(std::ostream& out) const { @@ -21009,11 +21920,11 @@ void swap(CmRecycleResponse &a, CmRecycleResponse &b) { (void) b; } -CmRecycleResponse::CmRecycleResponse(const CmRecycleResponse& other877) { - (void) other877; +CmRecycleResponse::CmRecycleResponse(const CmRecycleResponse& other921) { + (void) other921; } -CmRecycleResponse& CmRecycleResponse::operator=(const CmRecycleResponse& other878) { - (void) other878; +CmRecycleResponse& CmRecycleResponse::operator=(const CmRecycleResponse& other922) { + (void) other922; return *this; } void CmRecycleResponse::printTo(std::ostream& out) const { @@ -21154,19 +22065,19 @@ void swap(TableMeta &a, TableMeta &b) { swap(a.__isset, b.__isset); } -TableMeta::TableMeta(const TableMeta& other879) { - dbName = other879.dbName; - tableName = other879.tableName; - tableType = other879.tableType; - comments = other879.comments; - __isset = other879.__isset; +TableMeta::TableMeta(const TableMeta& other923) { + dbName = other923.dbName; + tableName = other923.tableName; + tableType = other923.tableType; + comments = other923.comments; + __isset = other923.__isset; } -TableMeta& TableMeta::operator=(const TableMeta& other880) { - dbName = other880.dbName; - tableName = other880.tableName; - tableType = other880.tableType; - comments = other880.comments; - __isset = other880.__isset; +TableMeta& TableMeta::operator=(const TableMeta& other924) { + dbName = other924.dbName; + tableName = other924.tableName; + tableType = other924.tableType; + comments = other924.comments; + __isset = other924.__isset; return *this; } void TableMeta::printTo(std::ostream& out) const { @@ -21232,15 +22143,15 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_SET) { { this->tablesUsed.clear(); - uint32_t _size881; - ::apache::thrift::protocol::TType _etype884; - xfer += iprot->readSetBegin(_etype884, _size881); - uint32_t _i885; - for (_i885 = 0; _i885 < _size881; ++_i885) + uint32_t _size925; + ::apache::thrift::protocol::TType _etype928; + xfer += iprot->readSetBegin(_etype928, _size925); + uint32_t _i929; + for (_i929 = 0; _i929 < _size925; ++_i929) { - std::string _elem886; - xfer += iprot->readString(_elem886); - this->tablesUsed.insert(_elem886); + std::string _elem930; + xfer += iprot->readString(_elem930); + this->tablesUsed.insert(_elem930); } xfer += iprot->readSetEnd(); } @@ -21287,10 +22198,10 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tablesUsed.size())); - std::set ::const_iterator _iter887; - for (_iter887 = this->tablesUsed.begin(); _iter887 != this->tablesUsed.end(); ++_iter887) + std::set ::const_iterator _iter931; + for (_iter931 = this->tablesUsed.begin(); _iter931 != this->tablesUsed.end(); ++_iter931) { - xfer += oprot->writeString((*_iter887)); + xfer += oprot->writeString((*_iter931)); } xfer += oprot->writeSetEnd(); } @@ -21312,15 +22223,15 @@ void swap(Materialization &a, Materialization &b) { swap(a.invalidationTime, b.invalidationTime); } -Materialization::Materialization(const Materialization& other888) { - materializationTable = other888.materializationTable; - tablesUsed = other888.tablesUsed; - invalidationTime = other888.invalidationTime; +Materialization::Materialization(const Materialization& other932) { + materializationTable = other932.materializationTable; + tablesUsed = other932.tablesUsed; + invalidationTime = other932.invalidationTime; } -Materialization& Materialization::operator=(const Materialization& other889) { - materializationTable = other889.materializationTable; - tablesUsed = other889.tablesUsed; - invalidationTime = other889.invalidationTime; +Materialization& Materialization::operator=(const Materialization& other933) { + materializationTable = other933.materializationTable; + tablesUsed = other933.tablesUsed; + invalidationTime = other933.invalidationTime; return *this; } void Materialization::printTo(std::ostream& out) const { @@ -21388,9 +22299,9 @@ uint32_t WMResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast890; - xfer += iprot->readI32(ecast890); - this->status = (WMResourcePlanStatus::type)ecast890; + int32_t ecast934; + xfer += iprot->readI32(ecast934); + this->status = (WMResourcePlanStatus::type)ecast934; this->__isset.status = true; } else { xfer += iprot->skip(ftype); @@ -21464,19 +22375,19 @@ void swap(WMResourcePlan &a, WMResourcePlan &b) { swap(a.__isset, b.__isset); } -WMResourcePlan::WMResourcePlan(const WMResourcePlan& other891) { - name = other891.name; - status = other891.status; - queryParallelism = other891.queryParallelism; - defaultPoolPath = other891.defaultPoolPath; - __isset = other891.__isset; +WMResourcePlan::WMResourcePlan(const WMResourcePlan& other935) { + name = other935.name; + status = other935.status; + queryParallelism = other935.queryParallelism; + defaultPoolPath = other935.defaultPoolPath; + __isset = other935.__isset; } -WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other892) { - name = other892.name; - status = other892.status; - queryParallelism = other892.queryParallelism; - defaultPoolPath = other892.defaultPoolPath; - __isset = other892.__isset; +WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other936) { + name = other936.name; + status = other936.status; + queryParallelism = other936.queryParallelism; + defaultPoolPath = other936.defaultPoolPath; + __isset = other936.__isset; return *this; } void WMResourcePlan::printTo(std::ostream& out) const { @@ -21555,9 +22466,9 @@ uint32_t WMNullableResourcePlan::read(::apache::thrift::protocol::TProtocol* ipr break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast893; - xfer += iprot->readI32(ecast893); - this->status = (WMResourcePlanStatus::type)ecast893; + int32_t ecast937; + xfer += iprot->readI32(ecast937); + this->status = (WMResourcePlanStatus::type)ecast937; this->__isset.status = true; } else { xfer += iprot->skip(ftype); @@ -21659,23 +22570,23 @@ void swap(WMNullableResourcePlan &a, WMNullableResourcePlan &b) { swap(a.__isset, b.__isset); } -WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other894) { - name = other894.name; - status = other894.status; - queryParallelism = other894.queryParallelism; - isSetQueryParallelism = other894.isSetQueryParallelism; - defaultPoolPath = other894.defaultPoolPath; - isSetDefaultPoolPath = other894.isSetDefaultPoolPath; - __isset = other894.__isset; -} -WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other895) { - name = other895.name; - status = other895.status; - queryParallelism = other895.queryParallelism; - isSetQueryParallelism = other895.isSetQueryParallelism; - defaultPoolPath = other895.defaultPoolPath; - isSetDefaultPoolPath = other895.isSetDefaultPoolPath; - __isset = other895.__isset; +WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other938) { + name = other938.name; + status = other938.status; + queryParallelism = other938.queryParallelism; + isSetQueryParallelism = other938.isSetQueryParallelism; + defaultPoolPath = other938.defaultPoolPath; + isSetDefaultPoolPath = other938.isSetDefaultPoolPath; + __isset = other938.__isset; +} +WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other939) { + name = other939.name; + status = other939.status; + queryParallelism = other939.queryParallelism; + isSetQueryParallelism = other939.isSetQueryParallelism; + defaultPoolPath = other939.defaultPoolPath; + isSetDefaultPoolPath = other939.isSetDefaultPoolPath; + __isset = other939.__isset; return *this; } void WMNullableResourcePlan::printTo(std::ostream& out) const { @@ -21840,21 +22751,21 @@ void swap(WMPool &a, WMPool &b) { swap(a.__isset, b.__isset); } -WMPool::WMPool(const WMPool& other896) { - resourcePlanName = other896.resourcePlanName; - poolPath = other896.poolPath; - allocFraction = other896.allocFraction; - queryParallelism = other896.queryParallelism; - schedulingPolicy = other896.schedulingPolicy; - __isset = other896.__isset; -} -WMPool& WMPool::operator=(const WMPool& other897) { - resourcePlanName = other897.resourcePlanName; - poolPath = other897.poolPath; - allocFraction = other897.allocFraction; - queryParallelism = other897.queryParallelism; - schedulingPolicy = other897.schedulingPolicy; - __isset = other897.__isset; +WMPool::WMPool(const WMPool& other940) { + resourcePlanName = other940.resourcePlanName; + poolPath = other940.poolPath; + allocFraction = other940.allocFraction; + queryParallelism = other940.queryParallelism; + schedulingPolicy = other940.schedulingPolicy; + __isset = other940.__isset; +} +WMPool& WMPool::operator=(const WMPool& other941) { + resourcePlanName = other941.resourcePlanName; + poolPath = other941.poolPath; + allocFraction = other941.allocFraction; + queryParallelism = other941.queryParallelism; + schedulingPolicy = other941.schedulingPolicy; + __isset = other941.__isset; return *this; } void WMPool::printTo(std::ostream& out) const { @@ -22037,23 +22948,23 @@ void swap(WMNullablePool &a, WMNullablePool &b) { swap(a.__isset, b.__isset); } -WMNullablePool::WMNullablePool(const WMNullablePool& other898) { - resourcePlanName = other898.resourcePlanName; - poolPath = other898.poolPath; - allocFraction = other898.allocFraction; - queryParallelism = other898.queryParallelism; - schedulingPolicy = other898.schedulingPolicy; - isSetSchedulingPolicy = other898.isSetSchedulingPolicy; - __isset = other898.__isset; -} -WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other899) { - resourcePlanName = other899.resourcePlanName; - poolPath = other899.poolPath; - allocFraction = other899.allocFraction; - queryParallelism = other899.queryParallelism; - schedulingPolicy = other899.schedulingPolicy; - isSetSchedulingPolicy = other899.isSetSchedulingPolicy; - __isset = other899.__isset; +WMNullablePool::WMNullablePool(const WMNullablePool& other942) { + resourcePlanName = other942.resourcePlanName; + poolPath = other942.poolPath; + allocFraction = other942.allocFraction; + queryParallelism = other942.queryParallelism; + schedulingPolicy = other942.schedulingPolicy; + isSetSchedulingPolicy = other942.isSetSchedulingPolicy; + __isset = other942.__isset; +} +WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other943) { + resourcePlanName = other943.resourcePlanName; + poolPath = other943.poolPath; + allocFraction = other943.allocFraction; + queryParallelism = other943.queryParallelism; + schedulingPolicy = other943.schedulingPolicy; + isSetSchedulingPolicy = other943.isSetSchedulingPolicy; + __isset = other943.__isset; return *this; } void WMNullablePool::printTo(std::ostream& out) const { @@ -22218,21 +23129,21 @@ void swap(WMTrigger &a, WMTrigger &b) { swap(a.__isset, b.__isset); } -WMTrigger::WMTrigger(const WMTrigger& other900) { - resourcePlanName = other900.resourcePlanName; - triggerName = other900.triggerName; - triggerExpression = other900.triggerExpression; - actionExpression = other900.actionExpression; - isInUnmanaged = other900.isInUnmanaged; - __isset = other900.__isset; -} -WMTrigger& WMTrigger::operator=(const WMTrigger& other901) { - resourcePlanName = other901.resourcePlanName; - triggerName = other901.triggerName; - triggerExpression = other901.triggerExpression; - actionExpression = other901.actionExpression; - isInUnmanaged = other901.isInUnmanaged; - __isset = other901.__isset; +WMTrigger::WMTrigger(const WMTrigger& other944) { + resourcePlanName = other944.resourcePlanName; + triggerName = other944.triggerName; + triggerExpression = other944.triggerExpression; + actionExpression = other944.actionExpression; + isInUnmanaged = other944.isInUnmanaged; + __isset = other944.__isset; +} +WMTrigger& WMTrigger::operator=(const WMTrigger& other945) { + resourcePlanName = other945.resourcePlanName; + triggerName = other945.triggerName; + triggerExpression = other945.triggerExpression; + actionExpression = other945.actionExpression; + isInUnmanaged = other945.isInUnmanaged; + __isset = other945.__isset; return *this; } void WMTrigger::printTo(std::ostream& out) const { @@ -22397,21 +23308,21 @@ void swap(WMMapping &a, WMMapping &b) { swap(a.__isset, b.__isset); } -WMMapping::WMMapping(const WMMapping& other902) { - resourcePlanName = other902.resourcePlanName; - entityType = other902.entityType; - entityName = other902.entityName; - poolPath = other902.poolPath; - ordering = other902.ordering; - __isset = other902.__isset; -} -WMMapping& WMMapping::operator=(const WMMapping& other903) { - resourcePlanName = other903.resourcePlanName; - entityType = other903.entityType; - entityName = other903.entityName; - poolPath = other903.poolPath; - ordering = other903.ordering; - __isset = other903.__isset; +WMMapping::WMMapping(const WMMapping& other946) { + resourcePlanName = other946.resourcePlanName; + entityType = other946.entityType; + entityName = other946.entityName; + poolPath = other946.poolPath; + ordering = other946.ordering; + __isset = other946.__isset; +} +WMMapping& WMMapping::operator=(const WMMapping& other947) { + resourcePlanName = other947.resourcePlanName; + entityType = other947.entityType; + entityName = other947.entityName; + poolPath = other947.poolPath; + ordering = other947.ordering; + __isset = other947.__isset; return *this; } void WMMapping::printTo(std::ostream& out) const { @@ -22517,13 +23428,13 @@ void swap(WMPoolTrigger &a, WMPoolTrigger &b) { swap(a.trigger, b.trigger); } -WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other904) { - pool = other904.pool; - trigger = other904.trigger; +WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other948) { + pool = other948.pool; + trigger = other948.trigger; } -WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other905) { - pool = other905.pool; - trigger = other905.trigger; +WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other949) { + pool = other949.pool; + trigger = other949.trigger; return *this; } void WMPoolTrigger::printTo(std::ostream& out) const { @@ -22597,14 +23508,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->pools.clear(); - uint32_t _size906; - ::apache::thrift::protocol::TType _etype909; - xfer += iprot->readListBegin(_etype909, _size906); - this->pools.resize(_size906); - uint32_t _i910; - for (_i910 = 0; _i910 < _size906; ++_i910) + uint32_t _size950; + ::apache::thrift::protocol::TType _etype953; + xfer += iprot->readListBegin(_etype953, _size950); + this->pools.resize(_size950); + uint32_t _i954; + for (_i954 = 0; _i954 < _size950; ++_i954) { - xfer += this->pools[_i910].read(iprot); + xfer += this->pools[_i954].read(iprot); } xfer += iprot->readListEnd(); } @@ -22617,14 +23528,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->mappings.clear(); - uint32_t _size911; - ::apache::thrift::protocol::TType _etype914; - xfer += iprot->readListBegin(_etype914, _size911); - this->mappings.resize(_size911); - uint32_t _i915; - for (_i915 = 0; _i915 < _size911; ++_i915) + uint32_t _size955; + ::apache::thrift::protocol::TType _etype958; + xfer += iprot->readListBegin(_etype958, _size955); + this->mappings.resize(_size955); + uint32_t _i959; + for (_i959 = 0; _i959 < _size955; ++_i959) { - xfer += this->mappings[_i915].read(iprot); + xfer += this->mappings[_i959].read(iprot); } xfer += iprot->readListEnd(); } @@ -22637,14 +23548,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->triggers.clear(); - uint32_t _size916; - ::apache::thrift::protocol::TType _etype919; - xfer += iprot->readListBegin(_etype919, _size916); - this->triggers.resize(_size916); - uint32_t _i920; - for (_i920 = 0; _i920 < _size916; ++_i920) + uint32_t _size960; + ::apache::thrift::protocol::TType _etype963; + xfer += iprot->readListBegin(_etype963, _size960); + this->triggers.resize(_size960); + uint32_t _i964; + for (_i964 = 0; _i964 < _size960; ++_i964) { - xfer += this->triggers[_i920].read(iprot); + xfer += this->triggers[_i964].read(iprot); } xfer += iprot->readListEnd(); } @@ -22657,14 +23568,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->poolTriggers.clear(); - uint32_t _size921; - ::apache::thrift::protocol::TType _etype924; - xfer += iprot->readListBegin(_etype924, _size921); - this->poolTriggers.resize(_size921); - uint32_t _i925; - for (_i925 = 0; _i925 < _size921; ++_i925) + uint32_t _size965; + ::apache::thrift::protocol::TType _etype968; + xfer += iprot->readListBegin(_etype968, _size965); + this->poolTriggers.resize(_size965); + uint32_t _i969; + for (_i969 = 0; _i969 < _size965; ++_i969) { - xfer += this->poolTriggers[_i925].read(iprot); + xfer += this->poolTriggers[_i969].read(iprot); } xfer += iprot->readListEnd(); } @@ -22701,10 +23612,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("pools", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->pools.size())); - std::vector ::const_iterator _iter926; - for (_iter926 = this->pools.begin(); _iter926 != this->pools.end(); ++_iter926) + std::vector ::const_iterator _iter970; + for (_iter970 = this->pools.begin(); _iter970 != this->pools.end(); ++_iter970) { - xfer += (*_iter926).write(oprot); + xfer += (*_iter970).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22714,10 +23625,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("mappings", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->mappings.size())); - std::vector ::const_iterator _iter927; - for (_iter927 = this->mappings.begin(); _iter927 != this->mappings.end(); ++_iter927) + std::vector ::const_iterator _iter971; + for (_iter971 = this->mappings.begin(); _iter971 != this->mappings.end(); ++_iter971) { - xfer += (*_iter927).write(oprot); + xfer += (*_iter971).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22727,10 +23638,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->triggers.size())); - std::vector ::const_iterator _iter928; - for (_iter928 = this->triggers.begin(); _iter928 != this->triggers.end(); ++_iter928) + std::vector ::const_iterator _iter972; + for (_iter972 = this->triggers.begin(); _iter972 != this->triggers.end(); ++_iter972) { - xfer += (*_iter928).write(oprot); + xfer += (*_iter972).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22740,10 +23651,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("poolTriggers", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->poolTriggers.size())); - std::vector ::const_iterator _iter929; - for (_iter929 = this->poolTriggers.begin(); _iter929 != this->poolTriggers.end(); ++_iter929) + std::vector ::const_iterator _iter973; + for (_iter973 = this->poolTriggers.begin(); _iter973 != this->poolTriggers.end(); ++_iter973) { - xfer += (*_iter929).write(oprot); + xfer += (*_iter973).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22764,21 +23675,21 @@ void swap(WMFullResourcePlan &a, WMFullResourcePlan &b) { swap(a.__isset, b.__isset); } -WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other930) { - plan = other930.plan; - pools = other930.pools; - mappings = other930.mappings; - triggers = other930.triggers; - poolTriggers = other930.poolTriggers; - __isset = other930.__isset; -} -WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other931) { - plan = other931.plan; - pools = other931.pools; - mappings = other931.mappings; - triggers = other931.triggers; - poolTriggers = other931.poolTriggers; - __isset = other931.__isset; +WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other974) { + plan = other974.plan; + pools = other974.pools; + mappings = other974.mappings; + triggers = other974.triggers; + poolTriggers = other974.poolTriggers; + __isset = other974.__isset; +} +WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other975) { + plan = other975.plan; + pools = other975.pools; + mappings = other975.mappings; + triggers = other975.triggers; + poolTriggers = other975.poolTriggers; + __isset = other975.__isset; return *this; } void WMFullResourcePlan::printTo(std::ostream& out) const { @@ -22883,15 +23794,15 @@ void swap(WMCreateResourcePlanRequest &a, WMCreateResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other932) { - resourcePlan = other932.resourcePlan; - copyFrom = other932.copyFrom; - __isset = other932.__isset; +WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other976) { + resourcePlan = other976.resourcePlan; + copyFrom = other976.copyFrom; + __isset = other976.__isset; } -WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other933) { - resourcePlan = other933.resourcePlan; - copyFrom = other933.copyFrom; - __isset = other933.__isset; +WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other977) { + resourcePlan = other977.resourcePlan; + copyFrom = other977.copyFrom; + __isset = other977.__isset; return *this; } void WMCreateResourcePlanRequest::printTo(std::ostream& out) const { @@ -22951,11 +23862,11 @@ void swap(WMCreateResourcePlanResponse &a, WMCreateResourcePlanResponse &b) { (void) b; } -WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other934) { - (void) other934; +WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other978) { + (void) other978; } -WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other935) { - (void) other935; +WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other979) { + (void) other979; return *this; } void WMCreateResourcePlanResponse::printTo(std::ostream& out) const { @@ -23013,11 +23924,11 @@ void swap(WMGetActiveResourcePlanRequest &a, WMGetActiveResourcePlanRequest &b) (void) b; } -WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other936) { - (void) other936; +WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other980) { + (void) other980; } -WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other937) { - (void) other937; +WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other981) { + (void) other981; return *this; } void WMGetActiveResourcePlanRequest::printTo(std::ostream& out) const { @@ -23098,13 +24009,13 @@ void swap(WMGetActiveResourcePlanResponse &a, WMGetActiveResourcePlanResponse &b swap(a.__isset, b.__isset); } -WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other938) { - resourcePlan = other938.resourcePlan; - __isset = other938.__isset; +WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other982) { + resourcePlan = other982.resourcePlan; + __isset = other982.__isset; } -WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other939) { - resourcePlan = other939.resourcePlan; - __isset = other939.__isset; +WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other983) { + resourcePlan = other983.resourcePlan; + __isset = other983.__isset; return *this; } void WMGetActiveResourcePlanResponse::printTo(std::ostream& out) const { @@ -23186,13 +24097,13 @@ void swap(WMGetResourcePlanRequest &a, WMGetResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other940) { - resourcePlanName = other940.resourcePlanName; - __isset = other940.__isset; +WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other984) { + resourcePlanName = other984.resourcePlanName; + __isset = other984.__isset; } -WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other941) { - resourcePlanName = other941.resourcePlanName; - __isset = other941.__isset; +WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other985) { + resourcePlanName = other985.resourcePlanName; + __isset = other985.__isset; return *this; } void WMGetResourcePlanRequest::printTo(std::ostream& out) const { @@ -23274,13 +24185,13 @@ void swap(WMGetResourcePlanResponse &a, WMGetResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other942) { - resourcePlan = other942.resourcePlan; - __isset = other942.__isset; +WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other986) { + resourcePlan = other986.resourcePlan; + __isset = other986.__isset; } -WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other943) { - resourcePlan = other943.resourcePlan; - __isset = other943.__isset; +WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other987) { + resourcePlan = other987.resourcePlan; + __isset = other987.__isset; return *this; } void WMGetResourcePlanResponse::printTo(std::ostream& out) const { @@ -23339,11 +24250,11 @@ void swap(WMGetAllResourcePlanRequest &a, WMGetAllResourcePlanRequest &b) { (void) b; } -WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other944) { - (void) other944; +WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other988) { + (void) other988; } -WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other945) { - (void) other945; +WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other989) { + (void) other989; return *this; } void WMGetAllResourcePlanRequest::printTo(std::ostream& out) const { @@ -23387,14 +24298,14 @@ uint32_t WMGetAllResourcePlanResponse::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->resourcePlans.clear(); - uint32_t _size946; - ::apache::thrift::protocol::TType _etype949; - xfer += iprot->readListBegin(_etype949, _size946); - this->resourcePlans.resize(_size946); - uint32_t _i950; - for (_i950 = 0; _i950 < _size946; ++_i950) + uint32_t _size990; + ::apache::thrift::protocol::TType _etype993; + xfer += iprot->readListBegin(_etype993, _size990); + this->resourcePlans.resize(_size990); + uint32_t _i994; + for (_i994 = 0; _i994 < _size990; ++_i994) { - xfer += this->resourcePlans[_i950].read(iprot); + xfer += this->resourcePlans[_i994].read(iprot); } xfer += iprot->readListEnd(); } @@ -23424,10 +24335,10 @@ uint32_t WMGetAllResourcePlanResponse::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("resourcePlans", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourcePlans.size())); - std::vector ::const_iterator _iter951; - for (_iter951 = this->resourcePlans.begin(); _iter951 != this->resourcePlans.end(); ++_iter951) + std::vector ::const_iterator _iter995; + for (_iter995 = this->resourcePlans.begin(); _iter995 != this->resourcePlans.end(); ++_iter995) { - xfer += (*_iter951).write(oprot); + xfer += (*_iter995).write(oprot); } xfer += oprot->writeListEnd(); } @@ -23444,13 +24355,13 @@ void swap(WMGetAllResourcePlanResponse &a, WMGetAllResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other952) { - resourcePlans = other952.resourcePlans; - __isset = other952.__isset; +WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other996) { + resourcePlans = other996.resourcePlans; + __isset = other996.__isset; } -WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other953) { - resourcePlans = other953.resourcePlans; - __isset = other953.__isset; +WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other997) { + resourcePlans = other997.resourcePlans; + __isset = other997.__isset; return *this; } void WMGetAllResourcePlanResponse::printTo(std::ostream& out) const { @@ -23608,21 +24519,21 @@ void swap(WMAlterResourcePlanRequest &a, WMAlterResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other954) { - resourcePlanName = other954.resourcePlanName; - resourcePlan = other954.resourcePlan; - isEnableAndActivate = other954.isEnableAndActivate; - isForceDeactivate = other954.isForceDeactivate; - isReplace = other954.isReplace; - __isset = other954.__isset; -} -WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other955) { - resourcePlanName = other955.resourcePlanName; - resourcePlan = other955.resourcePlan; - isEnableAndActivate = other955.isEnableAndActivate; - isForceDeactivate = other955.isForceDeactivate; - isReplace = other955.isReplace; - __isset = other955.__isset; +WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other998) { + resourcePlanName = other998.resourcePlanName; + resourcePlan = other998.resourcePlan; + isEnableAndActivate = other998.isEnableAndActivate; + isForceDeactivate = other998.isForceDeactivate; + isReplace = other998.isReplace; + __isset = other998.__isset; +} +WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other999) { + resourcePlanName = other999.resourcePlanName; + resourcePlan = other999.resourcePlan; + isEnableAndActivate = other999.isEnableAndActivate; + isForceDeactivate = other999.isForceDeactivate; + isReplace = other999.isReplace; + __isset = other999.__isset; return *this; } void WMAlterResourcePlanRequest::printTo(std::ostream& out) const { @@ -23708,13 +24619,13 @@ void swap(WMAlterResourcePlanResponse &a, WMAlterResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other956) { - fullResourcePlan = other956.fullResourcePlan; - __isset = other956.__isset; +WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other1000) { + fullResourcePlan = other1000.fullResourcePlan; + __isset = other1000.__isset; } -WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other957) { - fullResourcePlan = other957.fullResourcePlan; - __isset = other957.__isset; +WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other1001) { + fullResourcePlan = other1001.fullResourcePlan; + __isset = other1001.__isset; return *this; } void WMAlterResourcePlanResponse::printTo(std::ostream& out) const { @@ -23796,13 +24707,13 @@ void swap(WMValidateResourcePlanRequest &a, WMValidateResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other958) { - resourcePlanName = other958.resourcePlanName; - __isset = other958.__isset; +WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other1002) { + resourcePlanName = other1002.resourcePlanName; + __isset = other1002.__isset; } -WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other959) { - resourcePlanName = other959.resourcePlanName; - __isset = other959.__isset; +WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other1003) { + resourcePlanName = other1003.resourcePlanName; + __isset = other1003.__isset; return *this; } void WMValidateResourcePlanRequest::printTo(std::ostream& out) const { @@ -23852,14 +24763,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->errors.clear(); - uint32_t _size960; - ::apache::thrift::protocol::TType _etype963; - xfer += iprot->readListBegin(_etype963, _size960); - this->errors.resize(_size960); - uint32_t _i964; - for (_i964 = 0; _i964 < _size960; ++_i964) + uint32_t _size1004; + ::apache::thrift::protocol::TType _etype1007; + xfer += iprot->readListBegin(_etype1007, _size1004); + this->errors.resize(_size1004); + uint32_t _i1008; + for (_i1008 = 0; _i1008 < _size1004; ++_i1008) { - xfer += iprot->readString(this->errors[_i964]); + xfer += iprot->readString(this->errors[_i1008]); } xfer += iprot->readListEnd(); } @@ -23872,14 +24783,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->warnings.clear(); - uint32_t _size965; - ::apache::thrift::protocol::TType _etype968; - xfer += iprot->readListBegin(_etype968, _size965); - this->warnings.resize(_size965); - uint32_t _i969; - for (_i969 = 0; _i969 < _size965; ++_i969) + uint32_t _size1009; + ::apache::thrift::protocol::TType _etype1012; + xfer += iprot->readListBegin(_etype1012, _size1009); + this->warnings.resize(_size1009); + uint32_t _i1013; + for (_i1013 = 0; _i1013 < _size1009; ++_i1013) { - xfer += iprot->readString(this->warnings[_i969]); + xfer += iprot->readString(this->warnings[_i1013]); } xfer += iprot->readListEnd(); } @@ -23909,10 +24820,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("errors", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->errors.size())); - std::vector ::const_iterator _iter970; - for (_iter970 = this->errors.begin(); _iter970 != this->errors.end(); ++_iter970) + std::vector ::const_iterator _iter1014; + for (_iter1014 = this->errors.begin(); _iter1014 != this->errors.end(); ++_iter1014) { - xfer += oprot->writeString((*_iter970)); + xfer += oprot->writeString((*_iter1014)); } xfer += oprot->writeListEnd(); } @@ -23922,10 +24833,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("warnings", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->warnings.size())); - std::vector ::const_iterator _iter971; - for (_iter971 = this->warnings.begin(); _iter971 != this->warnings.end(); ++_iter971) + std::vector ::const_iterator _iter1015; + for (_iter1015 = this->warnings.begin(); _iter1015 != this->warnings.end(); ++_iter1015) { - xfer += oprot->writeString((*_iter971)); + xfer += oprot->writeString((*_iter1015)); } xfer += oprot->writeListEnd(); } @@ -23943,15 +24854,15 @@ void swap(WMValidateResourcePlanResponse &a, WMValidateResourcePlanResponse &b) swap(a.__isset, b.__isset); } -WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other972) { - errors = other972.errors; - warnings = other972.warnings; - __isset = other972.__isset; +WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other1016) { + errors = other1016.errors; + warnings = other1016.warnings; + __isset = other1016.__isset; } -WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other973) { - errors = other973.errors; - warnings = other973.warnings; - __isset = other973.__isset; +WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other1017) { + errors = other1017.errors; + warnings = other1017.warnings; + __isset = other1017.__isset; return *this; } void WMValidateResourcePlanResponse::printTo(std::ostream& out) const { @@ -24034,13 +24945,13 @@ void swap(WMDropResourcePlanRequest &a, WMDropResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other974) { - resourcePlanName = other974.resourcePlanName; - __isset = other974.__isset; +WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other1018) { + resourcePlanName = other1018.resourcePlanName; + __isset = other1018.__isset; } -WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other975) { - resourcePlanName = other975.resourcePlanName; - __isset = other975.__isset; +WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other1019) { + resourcePlanName = other1019.resourcePlanName; + __isset = other1019.__isset; return *this; } void WMDropResourcePlanRequest::printTo(std::ostream& out) const { @@ -24099,11 +25010,11 @@ void swap(WMDropResourcePlanResponse &a, WMDropResourcePlanResponse &b) { (void) b; } -WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other976) { - (void) other976; +WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other1020) { + (void) other1020; } -WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other977) { - (void) other977; +WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other1021) { + (void) other1021; return *this; } void WMDropResourcePlanResponse::printTo(std::ostream& out) const { @@ -24184,13 +25095,13 @@ void swap(WMCreateTriggerRequest &a, WMCreateTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other978) { - trigger = other978.trigger; - __isset = other978.__isset; +WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other1022) { + trigger = other1022.trigger; + __isset = other1022.__isset; } -WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other979) { - trigger = other979.trigger; - __isset = other979.__isset; +WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other1023) { + trigger = other1023.trigger; + __isset = other1023.__isset; return *this; } void WMCreateTriggerRequest::printTo(std::ostream& out) const { @@ -24249,11 +25160,11 @@ void swap(WMCreateTriggerResponse &a, WMCreateTriggerResponse &b) { (void) b; } -WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other980) { - (void) other980; +WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other1024) { + (void) other1024; } -WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other981) { - (void) other981; +WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other1025) { + (void) other1025; return *this; } void WMCreateTriggerResponse::printTo(std::ostream& out) const { @@ -24334,13 +25245,13 @@ void swap(WMAlterTriggerRequest &a, WMAlterTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other982) { - trigger = other982.trigger; - __isset = other982.__isset; +WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other1026) { + trigger = other1026.trigger; + __isset = other1026.__isset; } -WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other983) { - trigger = other983.trigger; - __isset = other983.__isset; +WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other1027) { + trigger = other1027.trigger; + __isset = other1027.__isset; return *this; } void WMAlterTriggerRequest::printTo(std::ostream& out) const { @@ -24399,11 +25310,11 @@ void swap(WMAlterTriggerResponse &a, WMAlterTriggerResponse &b) { (void) b; } -WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other984) { - (void) other984; +WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other1028) { + (void) other1028; } -WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other985) { - (void) other985; +WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other1029) { + (void) other1029; return *this; } void WMAlterTriggerResponse::printTo(std::ostream& out) const { @@ -24503,15 +25414,15 @@ void swap(WMDropTriggerRequest &a, WMDropTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other986) { - resourcePlanName = other986.resourcePlanName; - triggerName = other986.triggerName; - __isset = other986.__isset; +WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other1030) { + resourcePlanName = other1030.resourcePlanName; + triggerName = other1030.triggerName; + __isset = other1030.__isset; } -WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other987) { - resourcePlanName = other987.resourcePlanName; - triggerName = other987.triggerName; - __isset = other987.__isset; +WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other1031) { + resourcePlanName = other1031.resourcePlanName; + triggerName = other1031.triggerName; + __isset = other1031.__isset; return *this; } void WMDropTriggerRequest::printTo(std::ostream& out) const { @@ -24571,11 +25482,11 @@ void swap(WMDropTriggerResponse &a, WMDropTriggerResponse &b) { (void) b; } -WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other988) { - (void) other988; +WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other1032) { + (void) other1032; } -WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other989) { - (void) other989; +WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other1033) { + (void) other1033; return *this; } void WMDropTriggerResponse::printTo(std::ostream& out) const { @@ -24656,13 +25567,13 @@ void swap(WMGetTriggersForResourePlanRequest &a, WMGetTriggersForResourePlanRequ swap(a.__isset, b.__isset); } -WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other990) { - resourcePlanName = other990.resourcePlanName; - __isset = other990.__isset; +WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other1034) { + resourcePlanName = other1034.resourcePlanName; + __isset = other1034.__isset; } -WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other991) { - resourcePlanName = other991.resourcePlanName; - __isset = other991.__isset; +WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other1035) { + resourcePlanName = other1035.resourcePlanName; + __isset = other1035.__isset; return *this; } void WMGetTriggersForResourePlanRequest::printTo(std::ostream& out) const { @@ -24707,14 +25618,14 @@ uint32_t WMGetTriggersForResourePlanResponse::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { this->triggers.clear(); - uint32_t _size992; - ::apache::thrift::protocol::TType _etype995; - xfer += iprot->readListBegin(_etype995, _size992); - this->triggers.resize(_size992); - uint32_t _i996; - for (_i996 = 0; _i996 < _size992; ++_i996) + uint32_t _size1036; + ::apache::thrift::protocol::TType _etype1039; + xfer += iprot->readListBegin(_etype1039, _size1036); + this->triggers.resize(_size1036); + uint32_t _i1040; + for (_i1040 = 0; _i1040 < _size1036; ++_i1040) { - xfer += this->triggers[_i996].read(iprot); + xfer += this->triggers[_i1040].read(iprot); } xfer += iprot->readListEnd(); } @@ -24744,10 +25655,10 @@ uint32_t WMGetTriggersForResourePlanResponse::write(::apache::thrift::protocol:: xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->triggers.size())); - std::vector ::const_iterator _iter997; - for (_iter997 = this->triggers.begin(); _iter997 != this->triggers.end(); ++_iter997) + std::vector ::const_iterator _iter1041; + for (_iter1041 = this->triggers.begin(); _iter1041 != this->triggers.end(); ++_iter1041) { - xfer += (*_iter997).write(oprot); + xfer += (*_iter1041).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24764,13 +25675,13 @@ void swap(WMGetTriggersForResourePlanResponse &a, WMGetTriggersForResourePlanRes swap(a.__isset, b.__isset); } -WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other998) { - triggers = other998.triggers; - __isset = other998.__isset; +WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other1042) { + triggers = other1042.triggers; + __isset = other1042.__isset; } -WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other999) { - triggers = other999.triggers; - __isset = other999.__isset; +WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other1043) { + triggers = other1043.triggers; + __isset = other1043.__isset; return *this; } void WMGetTriggersForResourePlanResponse::printTo(std::ostream& out) const { @@ -24852,13 +25763,13 @@ void swap(WMCreatePoolRequest &a, WMCreatePoolRequest &b) { swap(a.__isset, b.__isset); } -WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other1000) { - pool = other1000.pool; - __isset = other1000.__isset; +WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other1044) { + pool = other1044.pool; + __isset = other1044.__isset; } -WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other1001) { - pool = other1001.pool; - __isset = other1001.__isset; +WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other1045) { + pool = other1045.pool; + __isset = other1045.__isset; return *this; } void WMCreatePoolRequest::printTo(std::ostream& out) const { @@ -24917,11 +25828,11 @@ void swap(WMCreatePoolResponse &a, WMCreatePoolResponse &b) { (void) b; } -WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other1002) { - (void) other1002; +WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other1046) { + (void) other1046; } -WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other1003) { - (void) other1003; +WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other1047) { + (void) other1047; return *this; } void WMCreatePoolResponse::printTo(std::ostream& out) const { @@ -25021,15 +25932,15 @@ void swap(WMAlterPoolRequest &a, WMAlterPoolRequest &b) { swap(a.__isset, b.__isset); } -WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other1004) { - pool = other1004.pool; - poolPath = other1004.poolPath; - __isset = other1004.__isset; +WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other1048) { + pool = other1048.pool; + poolPath = other1048.poolPath; + __isset = other1048.__isset; } -WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other1005) { - pool = other1005.pool; - poolPath = other1005.poolPath; - __isset = other1005.__isset; +WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other1049) { + pool = other1049.pool; + poolPath = other1049.poolPath; + __isset = other1049.__isset; return *this; } void WMAlterPoolRequest::printTo(std::ostream& out) const { @@ -25089,11 +26000,11 @@ void swap(WMAlterPoolResponse &a, WMAlterPoolResponse &b) { (void) b; } -WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other1006) { - (void) other1006; +WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other1050) { + (void) other1050; } -WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1007) { - (void) other1007; +WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1051) { + (void) other1051; return *this; } void WMAlterPoolResponse::printTo(std::ostream& out) const { @@ -25193,15 +26104,15 @@ void swap(WMDropPoolRequest &a, WMDropPoolRequest &b) { swap(a.__isset, b.__isset); } -WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1008) { - resourcePlanName = other1008.resourcePlanName; - poolPath = other1008.poolPath; - __isset = other1008.__isset; +WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1052) { + resourcePlanName = other1052.resourcePlanName; + poolPath = other1052.poolPath; + __isset = other1052.__isset; } -WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1009) { - resourcePlanName = other1009.resourcePlanName; - poolPath = other1009.poolPath; - __isset = other1009.__isset; +WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1053) { + resourcePlanName = other1053.resourcePlanName; + poolPath = other1053.poolPath; + __isset = other1053.__isset; return *this; } void WMDropPoolRequest::printTo(std::ostream& out) const { @@ -25261,11 +26172,11 @@ void swap(WMDropPoolResponse &a, WMDropPoolResponse &b) { (void) b; } -WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1010) { - (void) other1010; +WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1054) { + (void) other1054; } -WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1011) { - (void) other1011; +WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1055) { + (void) other1055; return *this; } void WMDropPoolResponse::printTo(std::ostream& out) const { @@ -25365,15 +26276,15 @@ void swap(WMCreateOrUpdateMappingRequest &a, WMCreateOrUpdateMappingRequest &b) swap(a.__isset, b.__isset); } -WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1012) { - mapping = other1012.mapping; - update = other1012.update; - __isset = other1012.__isset; +WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1056) { + mapping = other1056.mapping; + update = other1056.update; + __isset = other1056.__isset; } -WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1013) { - mapping = other1013.mapping; - update = other1013.update; - __isset = other1013.__isset; +WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1057) { + mapping = other1057.mapping; + update = other1057.update; + __isset = other1057.__isset; return *this; } void WMCreateOrUpdateMappingRequest::printTo(std::ostream& out) const { @@ -25433,11 +26344,11 @@ void swap(WMCreateOrUpdateMappingResponse &a, WMCreateOrUpdateMappingResponse &b (void) b; } -WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1014) { - (void) other1014; +WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1058) { + (void) other1058; } -WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1015) { - (void) other1015; +WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1059) { + (void) other1059; return *this; } void WMCreateOrUpdateMappingResponse::printTo(std::ostream& out) const { @@ -25518,13 +26429,13 @@ void swap(WMDropMappingRequest &a, WMDropMappingRequest &b) { swap(a.__isset, b.__isset); } -WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1016) { - mapping = other1016.mapping; - __isset = other1016.__isset; +WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1060) { + mapping = other1060.mapping; + __isset = other1060.__isset; } -WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1017) { - mapping = other1017.mapping; - __isset = other1017.__isset; +WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1061) { + mapping = other1061.mapping; + __isset = other1061.__isset; return *this; } void WMDropMappingRequest::printTo(std::ostream& out) const { @@ -25583,11 +26494,11 @@ void swap(WMDropMappingResponse &a, WMDropMappingResponse &b) { (void) b; } -WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1018) { - (void) other1018; +WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1062) { + (void) other1062; } -WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1019) { - (void) other1019; +WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1063) { + (void) other1063; return *this; } void WMDropMappingResponse::printTo(std::ostream& out) const { @@ -25725,19 +26636,19 @@ void swap(WMCreateOrDropTriggerToPoolMappingRequest &a, WMCreateOrDropTriggerToP swap(a.__isset, b.__isset); } -WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1020) { - resourcePlanName = other1020.resourcePlanName; - triggerName = other1020.triggerName; - poolPath = other1020.poolPath; - drop = other1020.drop; - __isset = other1020.__isset; +WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1064) { + resourcePlanName = other1064.resourcePlanName; + triggerName = other1064.triggerName; + poolPath = other1064.poolPath; + drop = other1064.drop; + __isset = other1064.__isset; } -WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1021) { - resourcePlanName = other1021.resourcePlanName; - triggerName = other1021.triggerName; - poolPath = other1021.poolPath; - drop = other1021.drop; - __isset = other1021.__isset; +WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1065) { + resourcePlanName = other1065.resourcePlanName; + triggerName = other1065.triggerName; + poolPath = other1065.poolPath; + drop = other1065.drop; + __isset = other1065.__isset; return *this; } void WMCreateOrDropTriggerToPoolMappingRequest::printTo(std::ostream& out) const { @@ -25799,11 +26710,11 @@ void swap(WMCreateOrDropTriggerToPoolMappingResponse &a, WMCreateOrDropTriggerTo (void) b; } -WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1022) { - (void) other1022; +WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1066) { + (void) other1066; } -WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1023) { - (void) other1023; +WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1067) { + (void) other1067; return *this; } void WMCreateOrDropTriggerToPoolMappingResponse::printTo(std::ostream& out) const { @@ -25882,13 +26793,13 @@ void swap(MetaException &a, MetaException &b) { swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other1024) : TException() { - message = other1024.message; - __isset = other1024.__isset; +MetaException::MetaException(const MetaException& other1068) : TException() { + message = other1068.message; + __isset = other1068.__isset; } -MetaException& MetaException::operator=(const MetaException& other1025) { - message = other1025.message; - __isset = other1025.__isset; +MetaException& MetaException::operator=(const MetaException& other1069) { + message = other1069.message; + __isset = other1069.__isset; return *this; } void MetaException::printTo(std::ostream& out) const { @@ -25979,13 +26890,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other1026) : TException() { - message = other1026.message; - __isset = other1026.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other1070) : TException() { + message = other1070.message; + __isset = other1070.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1027) { - message = other1027.message; - __isset = other1027.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1071) { + message = other1071.message; + __isset = other1071.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -26076,13 +26987,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other1028) : TException() { - message = other1028.message; - __isset = other1028.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other1072) : TException() { + message = other1072.message; + __isset = other1072.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1029) { - message = other1029.message; - __isset = other1029.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1073) { + message = other1073.message; + __isset = other1073.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -26173,13 +27084,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1030) : TException() { - message = other1030.message; - __isset = other1030.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1074) : TException() { + message = other1074.message; + __isset = other1074.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1031) { - message = other1031.message; - __isset = other1031.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1075) { + message = other1075.message; + __isset = other1075.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -26270,13 +27181,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1032) : TException() { - message = other1032.message; - __isset = other1032.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1076) : TException() { + message = other1076.message; + __isset = other1076.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1033) { - message = other1033.message; - __isset = other1033.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1077) { + message = other1077.message; + __isset = other1077.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -26367,13 +27278,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1034) : TException() { - message = other1034.message; - __isset = other1034.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1078) : TException() { + message = other1078.message; + __isset = other1078.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1035) { - message = other1035.message; - __isset = other1035.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1079) { + message = other1079.message; + __isset = other1079.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -26464,13 +27375,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1036) : TException() { - message = other1036.message; - __isset = other1036.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1080) : TException() { + message = other1080.message; + __isset = other1080.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1037) { - message = other1037.message; - __isset = other1037.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1081) { + message = other1081.message; + __isset = other1081.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -26561,13 +27472,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1038) : TException() { - message = other1038.message; - __isset = other1038.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1082) : TException() { + message = other1082.message; + __isset = other1082.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1039) { - message = other1039.message; - __isset = other1039.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1083) { + message = other1083.message; + __isset = other1083.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -26658,13 +27569,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) { swap(a.__isset, b.__isset); } -IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other1040) : TException() { - message = other1040.message; - __isset = other1040.__isset; +IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other1084) : TException() { + message = other1084.message; + __isset = other1084.__isset; } -IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other1041) { - message = other1041.message; - __isset = other1041.__isset; +IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other1085) { + message = other1085.message; + __isset = other1085.__isset; return *this; } void IndexAlreadyExistsException::printTo(std::ostream& out) const { @@ -26755,13 +27666,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1042) : TException() { - message = other1042.message; - __isset = other1042.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1086) : TException() { + message = other1086.message; + __isset = other1086.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1043) { - message = other1043.message; - __isset = other1043.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1087) { + message = other1087.message; + __isset = other1087.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -26852,13 +27763,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1044) : TException() { - message = other1044.message; - __isset = other1044.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1088) : TException() { + message = other1088.message; + __isset = other1088.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1045) { - message = other1045.message; - __isset = other1045.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1089) { + message = other1089.message; + __isset = other1089.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -26949,13 +27860,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other1046) : TException() { - message = other1046.message; - __isset = other1046.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other1090) : TException() { + message = other1090.message; + __isset = other1090.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1047) { - message = other1047.message; - __isset = other1047.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1091) { + message = other1091.message; + __isset = other1091.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -27046,13 +27957,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1048) : TException() { - message = other1048.message; - __isset = other1048.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1092) : TException() { + message = other1092.message; + __isset = other1092.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1049) { - message = other1049.message; - __isset = other1049.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1093) { + message = other1093.message; + __isset = other1093.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -27143,13 +28054,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1050) : TException() { - message = other1050.message; - __isset = other1050.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1094) : TException() { + message = other1094.message; + __isset = other1094.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1051) { - message = other1051.message; - __isset = other1051.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1095) { + message = other1095.message; + __isset = other1095.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -27240,13 +28151,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other1052) : TException() { - message = other1052.message; - __isset = other1052.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other1096) : TException() { + message = other1096.message; + __isset = other1096.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1053) { - message = other1053.message; - __isset = other1053.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1097) { + message = other1097.message; + __isset = other1097.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -27337,13 +28248,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1054) : TException() { - message = other1054.message; - __isset = other1054.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1098) : TException() { + message = other1098.message; + __isset = other1098.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1055) { - message = other1055.message; - __isset = other1055.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1099) { + message = other1099.message; + __isset = other1099.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index df646a7..1ae169c 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -364,6 +364,20 @@ class AbortTxnsRequest; class CommitTxnRequest; +class GetOpenWriteIdsRequest; + +class OpenWriteIds; + +class GetOpenWriteIdsResponse; + +class AddTransactionalTableRequest; + +class AllocateTableWriteIdRequest; + +class TxnToWriteId; + +class AllocateTableWriteIdResponse; + class LockComponent; class LockRequest; @@ -5954,6 +5968,339 @@ inline std::ostream& operator<<(std::ostream& out, const CommitTxnRequest& obj) return out; } + +class GetOpenWriteIdsRequest { + public: + + GetOpenWriteIdsRequest(const GetOpenWriteIdsRequest&); + GetOpenWriteIdsRequest& operator=(const GetOpenWriteIdsRequest&); + GetOpenWriteIdsRequest() : currentTxnId(0) { + } + + virtual ~GetOpenWriteIdsRequest() throw(); + int64_t currentTxnId; + std::vector tableNames; + + void __set_currentTxnId(const int64_t val); + + void __set_tableNames(const std::vector & val); + + bool operator == (const GetOpenWriteIdsRequest & rhs) const + { + if (!(currentTxnId == rhs.currentTxnId)) + return false; + if (!(tableNames == rhs.tableNames)) + return false; + return true; + } + bool operator != (const GetOpenWriteIdsRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetOpenWriteIdsRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetOpenWriteIdsRequest &a, GetOpenWriteIdsRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const GetOpenWriteIdsRequest& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _OpenWriteIds__isset { + _OpenWriteIds__isset() : minWriteId(false) {} + bool minWriteId :1; +} _OpenWriteIds__isset; + +class OpenWriteIds { + public: + + OpenWriteIds(const OpenWriteIds&); + OpenWriteIds& operator=(const OpenWriteIds&); + OpenWriteIds() : tableName(), writeIdHighWaterMark(0), minWriteId(0), abortedBits() { + } + + virtual ~OpenWriteIds() throw(); + std::string tableName; + int64_t writeIdHighWaterMark; + std::vector openWriteIds; + int64_t minWriteId; + std::string abortedBits; + + _OpenWriteIds__isset __isset; + + void __set_tableName(const std::string& val); + + void __set_writeIdHighWaterMark(const int64_t val); + + void __set_openWriteIds(const std::vector & val); + + void __set_minWriteId(const int64_t val); + + void __set_abortedBits(const std::string& val); + + bool operator == (const OpenWriteIds & rhs) const + { + if (!(tableName == rhs.tableName)) + return false; + if (!(writeIdHighWaterMark == rhs.writeIdHighWaterMark)) + return false; + if (!(openWriteIds == rhs.openWriteIds)) + return false; + if (__isset.minWriteId != rhs.__isset.minWriteId) + return false; + else if (__isset.minWriteId && !(minWriteId == rhs.minWriteId)) + return false; + if (!(abortedBits == rhs.abortedBits)) + return false; + return true; + } + bool operator != (const OpenWriteIds &rhs) const { + return !(*this == rhs); + } + + bool operator < (const OpenWriteIds & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(OpenWriteIds &a, OpenWriteIds &b); + +inline std::ostream& operator<<(std::ostream& out, const OpenWriteIds& obj) +{ + obj.printTo(out); + return out; +} + + +class GetOpenWriteIdsResponse { + public: + + GetOpenWriteIdsResponse(const GetOpenWriteIdsResponse&); + GetOpenWriteIdsResponse& operator=(const GetOpenWriteIdsResponse&); + GetOpenWriteIdsResponse() { + } + + virtual ~GetOpenWriteIdsResponse() throw(); + std::vector openWriteIds; + + void __set_openWriteIds(const std::vector & val); + + bool operator == (const GetOpenWriteIdsResponse & rhs) const + { + if (!(openWriteIds == rhs.openWriteIds)) + return false; + return true; + } + bool operator != (const GetOpenWriteIdsResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetOpenWriteIdsResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetOpenWriteIdsResponse &a, GetOpenWriteIdsResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const GetOpenWriteIdsResponse& obj) +{ + obj.printTo(out); + return out; +} + + +class AddTransactionalTableRequest { + public: + + AddTransactionalTableRequest(const AddTransactionalTableRequest&); + AddTransactionalTableRequest& operator=(const AddTransactionalTableRequest&); + AddTransactionalTableRequest() : dbName(), tableName() { + } + + virtual ~AddTransactionalTableRequest() throw(); + std::string dbName; + std::string tableName; + + void __set_dbName(const std::string& val); + + void __set_tableName(const std::string& val); + + bool operator == (const AddTransactionalTableRequest & rhs) const + { + if (!(dbName == rhs.dbName)) + return false; + if (!(tableName == rhs.tableName)) + return false; + return true; + } + bool operator != (const AddTransactionalTableRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const AddTransactionalTableRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(AddTransactionalTableRequest &a, AddTransactionalTableRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const AddTransactionalTableRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class AllocateTableWriteIdRequest { + public: + + AllocateTableWriteIdRequest(const AllocateTableWriteIdRequest&); + AllocateTableWriteIdRequest& operator=(const AllocateTableWriteIdRequest&); + AllocateTableWriteIdRequest() : dbName(), tableName() { + } + + virtual ~AllocateTableWriteIdRequest() throw(); + std::vector txnIds; + std::string dbName; + std::string tableName; + + void __set_txnIds(const std::vector & val); + + void __set_dbName(const std::string& val); + + void __set_tableName(const std::string& val); + + bool operator == (const AllocateTableWriteIdRequest & rhs) const + { + if (!(txnIds == rhs.txnIds)) + return false; + if (!(dbName == rhs.dbName)) + return false; + if (!(tableName == rhs.tableName)) + return false; + return true; + } + bool operator != (const AllocateTableWriteIdRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const AllocateTableWriteIdRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(AllocateTableWriteIdRequest &a, AllocateTableWriteIdRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const AllocateTableWriteIdRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class TxnToWriteId { + public: + + TxnToWriteId(const TxnToWriteId&); + TxnToWriteId& operator=(const TxnToWriteId&); + TxnToWriteId() : txnId(0), writeId(0) { + } + + virtual ~TxnToWriteId() throw(); + int64_t txnId; + int64_t writeId; + + void __set_txnId(const int64_t val); + + void __set_writeId(const int64_t val); + + bool operator == (const TxnToWriteId & rhs) const + { + if (!(txnId == rhs.txnId)) + return false; + if (!(writeId == rhs.writeId)) + return false; + return true; + } + bool operator != (const TxnToWriteId &rhs) const { + return !(*this == rhs); + } + + bool operator < (const TxnToWriteId & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(TxnToWriteId &a, TxnToWriteId &b); + +inline std::ostream& operator<<(std::ostream& out, const TxnToWriteId& obj) +{ + obj.printTo(out); + return out; +} + + +class AllocateTableWriteIdResponse { + public: + + AllocateTableWriteIdResponse(const AllocateTableWriteIdResponse&); + AllocateTableWriteIdResponse& operator=(const AllocateTableWriteIdResponse&); + AllocateTableWriteIdResponse() { + } + + virtual ~AllocateTableWriteIdResponse() throw(); + std::vector txnToWriteIds; + + void __set_txnToWriteIds(const std::vector & val); + + bool operator == (const AllocateTableWriteIdResponse & rhs) const + { + if (!(txnToWriteIds == rhs.txnToWriteIds)) + return false; + return true; + } + bool operator != (const AllocateTableWriteIdResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const AllocateTableWriteIdResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(AllocateTableWriteIdResponse &a, AllocateTableWriteIdResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const AllocateTableWriteIdResponse& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _LockComponent__isset { _LockComponent__isset() : tablename(false), partitionname(false), operationType(true), isAcid(true), isDynamicPartitionWrite(true) {} bool tablename :1; diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java index 2102aa5..eef05fe 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java @@ -727,13 +727,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartition case 4: // PARTITIONNAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list616 = iprot.readListBegin(); - struct.partitionnames = new ArrayList(_list616.size); - String _elem617; - for (int _i618 = 0; _i618 < _list616.size; ++_i618) + org.apache.thrift.protocol.TList _list656 = iprot.readListBegin(); + struct.partitionnames = new ArrayList(_list656.size); + String _elem657; + for (int _i658 = 0; _i658 < _list656.size; ++_i658) { - _elem617 = iprot.readString(); - struct.partitionnames.add(_elem617); + _elem657 = iprot.readString(); + struct.partitionnames.add(_elem657); } iprot.readListEnd(); } @@ -780,9 +780,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitio oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionnames.size())); - for (String _iter619 : struct.partitionnames) + for (String _iter659 : struct.partitionnames) { - oprot.writeString(_iter619); + oprot.writeString(_iter659); } oprot.writeListEnd(); } @@ -817,9 +817,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartition oprot.writeString(struct.tablename); { oprot.writeI32(struct.partitionnames.size()); - for (String _iter620 : struct.partitionnames) + for (String _iter660 : struct.partitionnames) { - oprot.writeString(_iter620); + oprot.writeString(_iter660); } } BitSet optionals = new BitSet(); @@ -842,13 +842,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions struct.tablename = iprot.readString(); struct.setTablenameIsSet(true); { - org.apache.thrift.protocol.TList _list621 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionnames = new ArrayList(_list621.size); - String _elem622; - for (int _i623 = 0; _i623 < _list621.size; ++_i623) + org.apache.thrift.protocol.TList _list661 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionnames = new ArrayList(_list661.size); + String _elem662; + for (int _i663 = 0; _i663 < _list661.size; ++_i663) { - _elem622 = iprot.readString(); - struct.partitionnames.add(_elem622); + _elem662 = iprot.readString(); + struct.partitionnames.add(_elem662); } } struct.setPartitionnamesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddTransactionalTableRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddTransactionalTableRequest.java new file mode 100644 index 0000000..6bdf19e --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddTransactionalTableRequest.java @@ -0,0 +1,490 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AddTransactionalTableRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddTransactionalTableRequest"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new AddTransactionalTableRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new AddTransactionalTableRequestTupleSchemeFactory()); + } + + private String dbName; // required + private String tableName; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "dbName"), + TABLE_NAME((short)2, "tableName"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TABLE_NAME + return TABLE_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddTransactionalTableRequest.class, metaDataMap); + } + + public AddTransactionalTableRequest() { + } + + public AddTransactionalTableRequest( + String dbName, + String tableName) + { + this(); + this.dbName = dbName; + this.tableName = tableName; + } + + /** + * Performs a deep copy on other. + */ + public AddTransactionalTableRequest(AddTransactionalTableRequest other) { + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTableName()) { + this.tableName = other.tableName; + } + } + + public AddTransactionalTableRequest deepCopy() { + return new AddTransactionalTableRequest(this); + } + + @Override + public void clear() { + this.dbName = null; + this.tableName = null; + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTableName() { + return this.tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public void unsetTableName() { + this.tableName = null; + } + + /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ + public boolean isSetTableName() { + return this.tableName != null; + } + + public void setTableNameIsSet(boolean value) { + if (!value) { + this.tableName = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TABLE_NAME: + if (value == null) { + unsetTableName(); + } else { + setTableName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDbName(); + + case TABLE_NAME: + return getTableName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDbName(); + case TABLE_NAME: + return isSetTableName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof AddTransactionalTableRequest) + return this.equals((AddTransactionalTableRequest)that); + return false; + } + + public boolean equals(AddTransactionalTableRequest that) { + if (that == null) + return false; + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tableName = true && this.isSetTableName(); + boolean that_present_tableName = true && that.isSetTableName(); + if (this_present_tableName || that_present_tableName) { + if (!(this_present_tableName && that_present_tableName)) + return false; + if (!this.tableName.equals(that.tableName)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tableName = true && (isSetTableName()); + list.add(present_tableName); + if (present_tableName) + list.add(tableName); + + return list.hashCode(); + } + + @Override + public int compareTo(AddTransactionalTableRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("AddTransactionalTableRequest("); + boolean first = true; + + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tableName:"); + if (this.tableName == null) { + sb.append("null"); + } else { + sb.append(this.tableName); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTableName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class AddTransactionalTableRequestStandardSchemeFactory implements SchemeFactory { + public AddTransactionalTableRequestStandardScheme getScheme() { + return new AddTransactionalTableRequestStandardScheme(); + } + } + + private static class AddTransactionalTableRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, AddTransactionalTableRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, AddTransactionalTableRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tableName != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.tableName); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class AddTransactionalTableRequestTupleSchemeFactory implements SchemeFactory { + public AddTransactionalTableRequestTupleScheme getScheme() { + return new AddTransactionalTableRequestTupleScheme(); + } + } + + private static class AddTransactionalTableRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, AddTransactionalTableRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.dbName); + oprot.writeString(struct.tableName); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, AddTransactionalTableRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdRequest.java new file mode 100644 index 0000000..5ea7354 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdRequest.java @@ -0,0 +1,640 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AllocateTableWriteIdRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AllocateTableWriteIdRequest"); + + private static final org.apache.thrift.protocol.TField TXN_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txnIds", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new AllocateTableWriteIdRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new AllocateTableWriteIdRequestTupleSchemeFactory()); + } + + private List txnIds; // required + private String dbName; // required + private String tableName; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TXN_IDS((short)1, "txnIds"), + DB_NAME((short)2, "dbName"), + TABLE_NAME((short)3, "tableName"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TXN_IDS + return TXN_IDS; + case 2: // DB_NAME + return DB_NAME; + case 3: // TABLE_NAME + return TABLE_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TXN_IDS, new org.apache.thrift.meta_data.FieldMetaData("txnIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AllocateTableWriteIdRequest.class, metaDataMap); + } + + public AllocateTableWriteIdRequest() { + } + + public AllocateTableWriteIdRequest( + List txnIds, + String dbName, + String tableName) + { + this(); + this.txnIds = txnIds; + this.dbName = dbName; + this.tableName = tableName; + } + + /** + * Performs a deep copy on other. + */ + public AllocateTableWriteIdRequest(AllocateTableWriteIdRequest other) { + if (other.isSetTxnIds()) { + List __this__txnIds = new ArrayList(other.txnIds); + this.txnIds = __this__txnIds; + } + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTableName()) { + this.tableName = other.tableName; + } + } + + public AllocateTableWriteIdRequest deepCopy() { + return new AllocateTableWriteIdRequest(this); + } + + @Override + public void clear() { + this.txnIds = null; + this.dbName = null; + this.tableName = null; + } + + public int getTxnIdsSize() { + return (this.txnIds == null) ? 0 : this.txnIds.size(); + } + + public java.util.Iterator getTxnIdsIterator() { + return (this.txnIds == null) ? null : this.txnIds.iterator(); + } + + public void addToTxnIds(long elem) { + if (this.txnIds == null) { + this.txnIds = new ArrayList(); + } + this.txnIds.add(elem); + } + + public List getTxnIds() { + return this.txnIds; + } + + public void setTxnIds(List txnIds) { + this.txnIds = txnIds; + } + + public void unsetTxnIds() { + this.txnIds = null; + } + + /** Returns true if field txnIds is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnIds() { + return this.txnIds != null; + } + + public void setTxnIdsIsSet(boolean value) { + if (!value) { + this.txnIds = null; + } + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTableName() { + return this.tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public void unsetTableName() { + this.tableName = null; + } + + /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ + public boolean isSetTableName() { + return this.tableName != null; + } + + public void setTableNameIsSet(boolean value) { + if (!value) { + this.tableName = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TXN_IDS: + if (value == null) { + unsetTxnIds(); + } else { + setTxnIds((List)value); + } + break; + + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TABLE_NAME: + if (value == null) { + unsetTableName(); + } else { + setTableName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TXN_IDS: + return getTxnIds(); + + case DB_NAME: + return getDbName(); + + case TABLE_NAME: + return getTableName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TXN_IDS: + return isSetTxnIds(); + case DB_NAME: + return isSetDbName(); + case TABLE_NAME: + return isSetTableName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof AllocateTableWriteIdRequest) + return this.equals((AllocateTableWriteIdRequest)that); + return false; + } + + public boolean equals(AllocateTableWriteIdRequest that) { + if (that == null) + return false; + + boolean this_present_txnIds = true && this.isSetTxnIds(); + boolean that_present_txnIds = true && that.isSetTxnIds(); + if (this_present_txnIds || that_present_txnIds) { + if (!(this_present_txnIds && that_present_txnIds)) + return false; + if (!this.txnIds.equals(that.txnIds)) + return false; + } + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tableName = true && this.isSetTableName(); + boolean that_present_tableName = true && that.isSetTableName(); + if (this_present_tableName || that_present_tableName) { + if (!(this_present_tableName && that_present_tableName)) + return false; + if (!this.tableName.equals(that.tableName)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_txnIds = true && (isSetTxnIds()); + list.add(present_txnIds); + if (present_txnIds) + list.add(txnIds); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tableName = true && (isSetTableName()); + list.add(present_tableName); + if (present_tableName) + list.add(tableName); + + return list.hashCode(); + } + + @Override + public int compareTo(AllocateTableWriteIdRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTxnIds()).compareTo(other.isSetTxnIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnIds, other.txnIds); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("AllocateTableWriteIdRequest("); + boolean first = true; + + sb.append("txnIds:"); + if (this.txnIds == null) { + sb.append("null"); + } else { + sb.append(this.txnIds); + } + first = false; + if (!first) sb.append(", "); + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tableName:"); + if (this.tableName == null) { + sb.append("null"); + } else { + sb.append(this.tableName); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetTxnIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'txnIds' is unset! Struct:" + toString()); + } + + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTableName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class AllocateTableWriteIdRequestStandardSchemeFactory implements SchemeFactory { + public AllocateTableWriteIdRequestStandardScheme getScheme() { + return new AllocateTableWriteIdRequestStandardScheme(); + } + } + + private static class AllocateTableWriteIdRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteIdRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TXN_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list590 = iprot.readListBegin(); + struct.txnIds = new ArrayList(_list590.size); + long _elem591; + for (int _i592 = 0; _i592 < _list590.size; ++_i592) + { + _elem591 = iprot.readI64(); + struct.txnIds.add(_elem591); + } + iprot.readListEnd(); + } + struct.setTxnIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWriteIdRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.txnIds != null) { + oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txnIds.size())); + for (long _iter593 : struct.txnIds) + { + oprot.writeI64(_iter593); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tableName != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.tableName); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class AllocateTableWriteIdRequestTupleSchemeFactory implements SchemeFactory { + public AllocateTableWriteIdRequestTupleScheme getScheme() { + return new AllocateTableWriteIdRequestTupleScheme(); + } + } + + private static class AllocateTableWriteIdRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.txnIds.size()); + for (long _iter594 : struct.txnIds) + { + oprot.writeI64(_iter594); + } + } + oprot.writeString(struct.dbName); + oprot.writeString(struct.tableName); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list595 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txnIds = new ArrayList(_list595.size); + long _elem596; + for (int _i597 = 0; _i597 < _list595.size; ++_i597) + { + _elem596 = iprot.readI64(); + struct.txnIds.add(_elem596); + } + } + struct.setTxnIdsIsSet(true); + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdResponse.java new file mode 100644 index 0000000..0969f22 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdResponse.java @@ -0,0 +1,443 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AllocateTableWriteIdResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AllocateTableWriteIdResponse"); + + private static final org.apache.thrift.protocol.TField TXN_TO_WRITE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txnToWriteIds", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new AllocateTableWriteIdResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new AllocateTableWriteIdResponseTupleSchemeFactory()); + } + + private List txnToWriteIds; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TXN_TO_WRITE_IDS((short)1, "txnToWriteIds"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TXN_TO_WRITE_IDS + return TXN_TO_WRITE_IDS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TXN_TO_WRITE_IDS, new org.apache.thrift.meta_data.FieldMetaData("txnToWriteIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TxnToWriteId.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AllocateTableWriteIdResponse.class, metaDataMap); + } + + public AllocateTableWriteIdResponse() { + } + + public AllocateTableWriteIdResponse( + List txnToWriteIds) + { + this(); + this.txnToWriteIds = txnToWriteIds; + } + + /** + * Performs a deep copy on other. + */ + public AllocateTableWriteIdResponse(AllocateTableWriteIdResponse other) { + if (other.isSetTxnToWriteIds()) { + List __this__txnToWriteIds = new ArrayList(other.txnToWriteIds.size()); + for (TxnToWriteId other_element : other.txnToWriteIds) { + __this__txnToWriteIds.add(new TxnToWriteId(other_element)); + } + this.txnToWriteIds = __this__txnToWriteIds; + } + } + + public AllocateTableWriteIdResponse deepCopy() { + return new AllocateTableWriteIdResponse(this); + } + + @Override + public void clear() { + this.txnToWriteIds = null; + } + + public int getTxnToWriteIdsSize() { + return (this.txnToWriteIds == null) ? 0 : this.txnToWriteIds.size(); + } + + public java.util.Iterator getTxnToWriteIdsIterator() { + return (this.txnToWriteIds == null) ? null : this.txnToWriteIds.iterator(); + } + + public void addToTxnToWriteIds(TxnToWriteId elem) { + if (this.txnToWriteIds == null) { + this.txnToWriteIds = new ArrayList(); + } + this.txnToWriteIds.add(elem); + } + + public List getTxnToWriteIds() { + return this.txnToWriteIds; + } + + public void setTxnToWriteIds(List txnToWriteIds) { + this.txnToWriteIds = txnToWriteIds; + } + + public void unsetTxnToWriteIds() { + this.txnToWriteIds = null; + } + + /** Returns true if field txnToWriteIds is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnToWriteIds() { + return this.txnToWriteIds != null; + } + + public void setTxnToWriteIdsIsSet(boolean value) { + if (!value) { + this.txnToWriteIds = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TXN_TO_WRITE_IDS: + if (value == null) { + unsetTxnToWriteIds(); + } else { + setTxnToWriteIds((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TXN_TO_WRITE_IDS: + return getTxnToWriteIds(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TXN_TO_WRITE_IDS: + return isSetTxnToWriteIds(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof AllocateTableWriteIdResponse) + return this.equals((AllocateTableWriteIdResponse)that); + return false; + } + + public boolean equals(AllocateTableWriteIdResponse that) { + if (that == null) + return false; + + boolean this_present_txnToWriteIds = true && this.isSetTxnToWriteIds(); + boolean that_present_txnToWriteIds = true && that.isSetTxnToWriteIds(); + if (this_present_txnToWriteIds || that_present_txnToWriteIds) { + if (!(this_present_txnToWriteIds && that_present_txnToWriteIds)) + return false; + if (!this.txnToWriteIds.equals(that.txnToWriteIds)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_txnToWriteIds = true && (isSetTxnToWriteIds()); + list.add(present_txnToWriteIds); + if (present_txnToWriteIds) + list.add(txnToWriteIds); + + return list.hashCode(); + } + + @Override + public int compareTo(AllocateTableWriteIdResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTxnToWriteIds()).compareTo(other.isSetTxnToWriteIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnToWriteIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnToWriteIds, other.txnToWriteIds); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("AllocateTableWriteIdResponse("); + boolean first = true; + + sb.append("txnToWriteIds:"); + if (this.txnToWriteIds == null) { + sb.append("null"); + } else { + sb.append(this.txnToWriteIds); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetTxnToWriteIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'txnToWriteIds' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class AllocateTableWriteIdResponseStandardSchemeFactory implements SchemeFactory { + public AllocateTableWriteIdResponseStandardScheme getScheme() { + return new AllocateTableWriteIdResponseStandardScheme(); + } + } + + private static class AllocateTableWriteIdResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteIdResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TXN_TO_WRITE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list598 = iprot.readListBegin(); + struct.txnToWriteIds = new ArrayList(_list598.size); + TxnToWriteId _elem599; + for (int _i600 = 0; _i600 < _list598.size; ++_i600) + { + _elem599 = new TxnToWriteId(); + _elem599.read(iprot); + struct.txnToWriteIds.add(_elem599); + } + iprot.readListEnd(); + } + struct.setTxnToWriteIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWriteIdResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.txnToWriteIds != null) { + oprot.writeFieldBegin(TXN_TO_WRITE_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.txnToWriteIds.size())); + for (TxnToWriteId _iter601 : struct.txnToWriteIds) + { + _iter601.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class AllocateTableWriteIdResponseTupleSchemeFactory implements SchemeFactory { + public AllocateTableWriteIdResponseTupleScheme getScheme() { + return new AllocateTableWriteIdResponseTupleScheme(); + } + } + + private static class AllocateTableWriteIdResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.txnToWriteIds.size()); + for (TxnToWriteId _iter602 : struct.txnToWriteIds) + { + _iter602.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.txnToWriteIds = new ArrayList(_list603.size); + TxnToWriteId _elem604; + for (int _i605 = 0; _i605 < _list603.size; ++_i605) + { + _elem604 = new TxnToWriteId(); + _elem604.read(iprot); + struct.txnToWriteIds.add(_elem604); + } + } + struct.setTxnToWriteIdsIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java index dbda2ab..647061e 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataRe case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list716 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list716.size); - long _elem717; - for (int _i718 = 0; _i718 < _list716.size; ++_i718) + org.apache.thrift.protocol.TList _list756 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list756.size); + long _elem757; + for (int _i758 = 0; _i758 < _list756.size; ++_i758) { - _elem717 = iprot.readI64(); - struct.fileIds.add(_elem717); + _elem757 = iprot.readI64(); + struct.fileIds.add(_elem757); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataR oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter719 : struct.fileIds) + for (long _iter759 : struct.fileIds) { - oprot.writeI64(_iter719); + oprot.writeI64(_iter759); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter720 : struct.fileIds) + for (long _iter760 : struct.fileIds) { - oprot.writeI64(_iter720); + oprot.writeI64(_iter760); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list721.size); - long _elem722; - for (int _i723 = 0; _i723 < _list721.size; ++_i723) + org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list761.size); + long _elem762; + for (int _i763 = 0; _i763 < _list761.size; ++_i763) { - _elem722 = iprot.readI64(); - struct.fileIds.add(_elem722); + _elem762 = iprot.readI64(); + struct.fileIds.add(_elem762); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java index 0df33f1..4b09811 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java @@ -354,13 +354,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClientCapabilities case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list732 = iprot.readListBegin(); - struct.values = new ArrayList(_list732.size); - ClientCapability _elem733; - for (int _i734 = 0; _i734 < _list732.size; ++_i734) + org.apache.thrift.protocol.TList _list772 = iprot.readListBegin(); + struct.values = new ArrayList(_list772.size); + ClientCapability _elem773; + for (int _i774 = 0; _i774 < _list772.size; ++_i774) { - _elem733 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem733); + _elem773 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem773); } iprot.readListEnd(); } @@ -386,9 +386,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClientCapabilities oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size())); - for (ClientCapability _iter735 : struct.values) + for (ClientCapability _iter775 : struct.values) { - oprot.writeI32(_iter735.getValue()); + oprot.writeI32(_iter775.getValue()); } oprot.writeListEnd(); } @@ -413,9 +413,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.values.size()); - for (ClientCapability _iter736 : struct.values) + for (ClientCapability _iter776 : struct.values) { - oprot.writeI32(_iter736.getValue()); + oprot.writeI32(_iter776.getValue()); } } } @@ -424,13 +424,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities public void read(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.values = new ArrayList(_list737.size); - ClientCapability _elem738; - for (int _i739 = 0; _i739 < _list737.size; ++_i739) + org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.values = new ArrayList(_list777.size); + ClientCapability _elem778; + for (int _i779 = 0; _i779 < _list777.size; ++_i779) { - _elem738 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem738); + _elem778 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem778); } } struct.setValuesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java index b92293e..78016bd 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java @@ -814,15 +814,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionRequest s case 6: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map598 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map598.size); - String _key599; - String _val600; - for (int _i601 = 0; _i601 < _map598.size; ++_i601) + org.apache.thrift.protocol.TMap _map638 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map638.size); + String _key639; + String _val640; + for (int _i641 = 0; _i641 < _map638.size; ++_i641) { - _key599 = iprot.readString(); - _val600 = iprot.readString(); - struct.properties.put(_key599, _val600); + _key639 = iprot.readString(); + _val640 = iprot.readString(); + struct.properties.put(_key639, _val640); } iprot.readMapEnd(); } @@ -878,10 +878,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionRequest oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter602 : struct.properties.entrySet()) + for (Map.Entry _iter642 : struct.properties.entrySet()) { - oprot.writeString(_iter602.getKey()); - oprot.writeString(_iter602.getValue()); + oprot.writeString(_iter642.getKey()); + oprot.writeString(_iter642.getValue()); } oprot.writeMapEnd(); } @@ -928,10 +928,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest s if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter603 : struct.properties.entrySet()) + for (Map.Entry _iter643 : struct.properties.entrySet()) { - oprot.writeString(_iter603.getKey()); - oprot.writeString(_iter603.getValue()); + oprot.writeString(_iter643.getKey()); + oprot.writeString(_iter643.getValue()); } } } @@ -957,15 +957,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest st } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map604 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map604.size); - String _key605; - String _val606; - for (int _i607 = 0; _i607 < _map604.size; ++_i607) + org.apache.thrift.protocol.TMap _map644 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map644.size); + String _key645; + String _val646; + for (int _i647 = 0; _i647 < _map644.size; ++_i647) { - _key605 = iprot.readString(); - _val606 = iprot.readString(); - struct.properties.put(_key605, _val606); + _key645 = iprot.readString(); + _val646 = iprot.readString(); + struct.properties.put(_key645, _val646); } } struct.setPropertiesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java index 4efec9d..6d7b0a5 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java @@ -713,13 +713,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventRequest st case 5: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list656 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list656.size); - String _elem657; - for (int _i658 = 0; _i658 < _list656.size; ++_i658) + org.apache.thrift.protocol.TList _list696 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list696.size); + String _elem697; + for (int _i698 = 0; _i698 < _list696.size; ++_i698) { - _elem657 = iprot.readString(); - struct.partitionVals.add(_elem657); + _elem697 = iprot.readString(); + struct.partitionVals.add(_elem697); } iprot.readListEnd(); } @@ -768,9 +768,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FireEventRequest s oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter659 : struct.partitionVals) + for (String _iter699 : struct.partitionVals) { - oprot.writeString(_iter659); + oprot.writeString(_iter699); } oprot.writeListEnd(); } @@ -816,9 +816,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest st if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter660 : struct.partitionVals) + for (String _iter700 : struct.partitionVals) { - oprot.writeString(_iter660); + oprot.writeString(_iter700); } } } @@ -843,13 +843,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest str } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list661 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list661.size); - String _elem662; - for (int _i663 = 0; _i663 < _list661.size; ++_i663) + org.apache.thrift.protocol.TList _list701 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list701.size); + String _elem702; + for (int _i703 = 0; _i703 < _list701.size; ++_i703) { - _elem662 = iprot.readString(); - struct.partitionVals.add(_elem662); + _elem702 = iprot.readString(); + struct.partitionVals.add(_elem702); } } struct.setPartitionValsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java index bff424f..5f75507 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResp case 1: // FUNCTIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list724 = iprot.readListBegin(); - struct.functions = new ArrayList(_list724.size); - Function _elem725; - for (int _i726 = 0; _i726 < _list724.size; ++_i726) + org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); + struct.functions = new ArrayList(_list764.size); + Function _elem765; + for (int _i766 = 0; _i766 < _list764.size; ++_i766) { - _elem725 = new Function(); - _elem725.read(iprot); - struct.functions.add(_elem725); + _elem765 = new Function(); + _elem765.read(iprot); + struct.functions.add(_elem765); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsRes oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size())); - for (Function _iter727 : struct.functions) + for (Function _iter767 : struct.functions) { - _iter727.write(oprot); + _iter767.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResp if (struct.isSetFunctions()) { { oprot.writeI32(struct.functions.size()); - for (Function _iter728 : struct.functions) + for (Function _iter768 : struct.functions) { - _iter728.write(oprot); + _iter768.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsRespo BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.functions = new ArrayList(_list729.size); - Function _elem730; - for (int _i731 = 0; _i731 < _list729.size; ++_i731) + org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.functions = new ArrayList(_list769.size); + Function _elem770; + for (int _i771 = 0; _i771 < _list769.size; ++_i771) { - _elem730 = new Function(); - _elem730.read(iprot); - struct.functions.add(_elem730); + _elem770 = new Function(); + _elem770.read(iprot); + struct.functions.add(_elem770); } } struct.setFunctionsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java index 38a5ed9..c290cb9 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java @@ -619,13 +619,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list674 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list674.size); - long _elem675; - for (int _i676 = 0; _i676 < _list674.size; ++_i676) + org.apache.thrift.protocol.TList _list714 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list714.size); + long _elem715; + for (int _i716 = 0; _i716 < _list714.size; ++_i716) { - _elem675 = iprot.readI64(); - struct.fileIds.add(_elem675); + _elem715 = iprot.readI64(); + struct.fileIds.add(_elem715); } iprot.readListEnd(); } @@ -675,9 +675,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter677 : struct.fileIds) + for (long _iter717 : struct.fileIds) { - oprot.writeI64(_iter677); + oprot.writeI64(_iter717); } oprot.writeListEnd(); } @@ -719,9 +719,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter678 : struct.fileIds) + for (long _iter718 : struct.fileIds) { - oprot.writeI64(_iter678); + oprot.writeI64(_iter718); } } oprot.writeBinary(struct.expr); @@ -745,13 +745,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list679 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list679.size); - long _elem680; - for (int _i681 = 0; _i681 < _list679.size; ++_i681) + org.apache.thrift.protocol.TList _list719 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list719.size); + long _elem720; + for (int _i721 = 0; _i721 < _list719.size; ++_i721) { - _elem680 = iprot.readI64(); - struct.fileIds.add(_elem680); + _elem720 = iprot.readI64(); + struct.fileIds.add(_elem720); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java index a3dc743..07e13b9 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java @@ -444,16 +444,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map664 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map664.size); - long _key665; - MetadataPpdResult _val666; - for (int _i667 = 0; _i667 < _map664.size; ++_i667) + org.apache.thrift.protocol.TMap _map704 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map704.size); + long _key705; + MetadataPpdResult _val706; + for (int _i707 = 0; _i707 < _map704.size; ++_i707) { - _key665 = iprot.readI64(); - _val666 = new MetadataPpdResult(); - _val666.read(iprot); - struct.metadata.put(_key665, _val666); + _key705 = iprot.readI64(); + _val706 = new MetadataPpdResult(); + _val706.read(iprot); + struct.metadata.put(_key705, _val706); } iprot.readMapEnd(); } @@ -487,10 +487,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, struct.metadata.size())); - for (Map.Entry _iter668 : struct.metadata.entrySet()) + for (Map.Entry _iter708 : struct.metadata.entrySet()) { - oprot.writeI64(_iter668.getKey()); - _iter668.getValue().write(oprot); + oprot.writeI64(_iter708.getKey()); + _iter708.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -518,10 +518,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter669 : struct.metadata.entrySet()) + for (Map.Entry _iter709 : struct.metadata.entrySet()) { - oprot.writeI64(_iter669.getKey()); - _iter669.getValue().write(oprot); + oprot.writeI64(_iter709.getKey()); + _iter709.getValue().write(oprot); } } oprot.writeBool(struct.isSupported); @@ -531,16 +531,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map670 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.metadata = new HashMap(2*_map670.size); - long _key671; - MetadataPpdResult _val672; - for (int _i673 = 0; _i673 < _map670.size; ++_i673) + org.apache.thrift.protocol.TMap _map710 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.metadata = new HashMap(2*_map710.size); + long _key711; + MetadataPpdResult _val712; + for (int _i713 = 0; _i713 < _map710.size; ++_i713) { - _key671 = iprot.readI64(); - _val672 = new MetadataPpdResult(); - _val672.read(iprot); - struct.metadata.put(_key671, _val672); + _key711 = iprot.readI64(); + _val712 = new MetadataPpdResult(); + _val712.read(iprot); + struct.metadata.put(_key711, _val712); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java index 53603af..ac0ab45 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list692 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list692.size); - long _elem693; - for (int _i694 = 0; _i694 < _list692.size; ++_i694) + org.apache.thrift.protocol.TList _list732 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list732.size); + long _elem733; + for (int _i734 = 0; _i734 < _list732.size; ++_i734) { - _elem693 = iprot.readI64(); - struct.fileIds.add(_elem693); + _elem733 = iprot.readI64(); + struct.fileIds.add(_elem733); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter695 : struct.fileIds) + for (long _iter735 : struct.fileIds) { - oprot.writeI64(_iter695); + oprot.writeI64(_iter735); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter696 : struct.fileIds) + for (long _iter736 : struct.fileIds) { - oprot.writeI64(_iter696); + oprot.writeI64(_iter736); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list697 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list697.size); - long _elem698; - for (int _i699 = 0; _i699 < _list697.size; ++_i699) + org.apache.thrift.protocol.TList _list737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list737.size); + long _elem738; + for (int _i739 = 0; _i739 < _list737.size; ++_i739) { - _elem698 = iprot.readI64(); - struct.fileIds.add(_elem698); + _elem738 = iprot.readI64(); + struct.fileIds.add(_elem738); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java index 440965e..952c125 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java @@ -433,15 +433,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataResu case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map682 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map682.size); - long _key683; - ByteBuffer _val684; - for (int _i685 = 0; _i685 < _map682.size; ++_i685) + org.apache.thrift.protocol.TMap _map722 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map722.size); + long _key723; + ByteBuffer _val724; + for (int _i725 = 0; _i725 < _map722.size; ++_i725) { - _key683 = iprot.readI64(); - _val684 = iprot.readBinary(); - struct.metadata.put(_key683, _val684); + _key723 = iprot.readI64(); + _val724 = iprot.readBinary(); + struct.metadata.put(_key723, _val724); } iprot.readMapEnd(); } @@ -475,10 +475,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataRes oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (Map.Entry _iter686 : struct.metadata.entrySet()) + for (Map.Entry _iter726 : struct.metadata.entrySet()) { - oprot.writeI64(_iter686.getKey()); - oprot.writeBinary(_iter686.getValue()); + oprot.writeI64(_iter726.getKey()); + oprot.writeBinary(_iter726.getValue()); } oprot.writeMapEnd(); } @@ -506,10 +506,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter687 : struct.metadata.entrySet()) + for (Map.Entry _iter727 : struct.metadata.entrySet()) { - oprot.writeI64(_iter687.getKey()); - oprot.writeBinary(_iter687.getValue()); + oprot.writeI64(_iter727.getKey()); + oprot.writeBinary(_iter727.getValue()); } } oprot.writeBool(struct.isSupported); @@ -519,15 +519,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map688 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new HashMap(2*_map688.size); - long _key689; - ByteBuffer _val690; - for (int _i691 = 0; _i691 < _map688.size; ++_i691) + org.apache.thrift.protocol.TMap _map728 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new HashMap(2*_map728.size); + long _key729; + ByteBuffer _val730; + for (int _i731 = 0; _i731 < _map728.size; ++_i731) { - _key689 = iprot.readI64(); - _val690 = iprot.readBinary(); - struct.metadata.put(_key689, _val690); + _key729 = iprot.readI64(); + _val730 = iprot.readBinary(); + struct.metadata.put(_key729, _val730); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenWriteIdsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenWriteIdsRequest.java new file mode 100644 index 0000000..a1ba32b --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenWriteIdsRequest.java @@ -0,0 +1,537 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetOpenWriteIdsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOpenWriteIdsRequest"); + + private static final org.apache.thrift.protocol.TField CURRENT_TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("currentTxnId", org.apache.thrift.protocol.TType.I64, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("tableNames", org.apache.thrift.protocol.TType.LIST, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetOpenWriteIdsRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetOpenWriteIdsRequestTupleSchemeFactory()); + } + + private long currentTxnId; // required + private List tableNames; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CURRENT_TXN_ID((short)1, "currentTxnId"), + TABLE_NAMES((short)2, "tableNames"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CURRENT_TXN_ID + return CURRENT_TXN_ID; + case 2: // TABLE_NAMES + return TABLE_NAMES; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __CURRENTTXNID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CURRENT_TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("currentTxnId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.TABLE_NAMES, new org.apache.thrift.meta_data.FieldMetaData("tableNames", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetOpenWriteIdsRequest.class, metaDataMap); + } + + public GetOpenWriteIdsRequest() { + } + + public GetOpenWriteIdsRequest( + long currentTxnId, + List tableNames) + { + this(); + this.currentTxnId = currentTxnId; + setCurrentTxnIdIsSet(true); + this.tableNames = tableNames; + } + + /** + * Performs a deep copy on other. + */ + public GetOpenWriteIdsRequest(GetOpenWriteIdsRequest other) { + __isset_bitfield = other.__isset_bitfield; + this.currentTxnId = other.currentTxnId; + if (other.isSetTableNames()) { + List __this__tableNames = new ArrayList(other.tableNames); + this.tableNames = __this__tableNames; + } + } + + public GetOpenWriteIdsRequest deepCopy() { + return new GetOpenWriteIdsRequest(this); + } + + @Override + public void clear() { + setCurrentTxnIdIsSet(false); + this.currentTxnId = 0; + this.tableNames = null; + } + + public long getCurrentTxnId() { + return this.currentTxnId; + } + + public void setCurrentTxnId(long currentTxnId) { + this.currentTxnId = currentTxnId; + setCurrentTxnIdIsSet(true); + } + + public void unsetCurrentTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CURRENTTXNID_ISSET_ID); + } + + /** Returns true if field currentTxnId is set (has been assigned a value) and false otherwise */ + public boolean isSetCurrentTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __CURRENTTXNID_ISSET_ID); + } + + public void setCurrentTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CURRENTTXNID_ISSET_ID, value); + } + + public int getTableNamesSize() { + return (this.tableNames == null) ? 0 : this.tableNames.size(); + } + + public java.util.Iterator getTableNamesIterator() { + return (this.tableNames == null) ? null : this.tableNames.iterator(); + } + + public void addToTableNames(String elem) { + if (this.tableNames == null) { + this.tableNames = new ArrayList(); + } + this.tableNames.add(elem); + } + + public List getTableNames() { + return this.tableNames; + } + + public void setTableNames(List tableNames) { + this.tableNames = tableNames; + } + + public void unsetTableNames() { + this.tableNames = null; + } + + /** Returns true if field tableNames is set (has been assigned a value) and false otherwise */ + public boolean isSetTableNames() { + return this.tableNames != null; + } + + public void setTableNamesIsSet(boolean value) { + if (!value) { + this.tableNames = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CURRENT_TXN_ID: + if (value == null) { + unsetCurrentTxnId(); + } else { + setCurrentTxnId((Long)value); + } + break; + + case TABLE_NAMES: + if (value == null) { + unsetTableNames(); + } else { + setTableNames((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CURRENT_TXN_ID: + return getCurrentTxnId(); + + case TABLE_NAMES: + return getTableNames(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CURRENT_TXN_ID: + return isSetCurrentTxnId(); + case TABLE_NAMES: + return isSetTableNames(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetOpenWriteIdsRequest) + return this.equals((GetOpenWriteIdsRequest)that); + return false; + } + + public boolean equals(GetOpenWriteIdsRequest that) { + if (that == null) + return false; + + boolean this_present_currentTxnId = true; + boolean that_present_currentTxnId = true; + if (this_present_currentTxnId || that_present_currentTxnId) { + if (!(this_present_currentTxnId && that_present_currentTxnId)) + return false; + if (this.currentTxnId != that.currentTxnId) + return false; + } + + boolean this_present_tableNames = true && this.isSetTableNames(); + boolean that_present_tableNames = true && that.isSetTableNames(); + if (this_present_tableNames || that_present_tableNames) { + if (!(this_present_tableNames && that_present_tableNames)) + return false; + if (!this.tableNames.equals(that.tableNames)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_currentTxnId = true; + list.add(present_currentTxnId); + if (present_currentTxnId) + list.add(currentTxnId); + + boolean present_tableNames = true && (isSetTableNames()); + list.add(present_tableNames); + if (present_tableNames) + list.add(tableNames); + + return list.hashCode(); + } + + @Override + public int compareTo(GetOpenWriteIdsRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetCurrentTxnId()).compareTo(other.isSetCurrentTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCurrentTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.currentTxnId, other.currentTxnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTableNames()).compareTo(other.isSetTableNames()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableNames()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableNames, other.tableNames); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetOpenWriteIdsRequest("); + boolean first = true; + + sb.append("currentTxnId:"); + sb.append(this.currentTxnId); + first = false; + if (!first) sb.append(", "); + sb.append("tableNames:"); + if (this.tableNames == null) { + sb.append("null"); + } else { + sb.append(this.tableNames); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetCurrentTxnId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'currentTxnId' is unset! Struct:" + toString()); + } + + if (!isSetTableNames()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableNames' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetOpenWriteIdsRequestStandardSchemeFactory implements SchemeFactory { + public GetOpenWriteIdsRequestStandardScheme getScheme() { + return new GetOpenWriteIdsRequestStandardScheme(); + } + } + + private static class GetOpenWriteIdsRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenWriteIdsRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // CURRENT_TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.currentTxnId = iprot.readI64(); + struct.setCurrentTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TABLE_NAMES + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list566 = iprot.readListBegin(); + struct.tableNames = new ArrayList(_list566.size); + String _elem567; + for (int _i568 = 0; _i568 < _list566.size; ++_i568) + { + _elem567 = iprot.readString(); + struct.tableNames.add(_elem567); + } + iprot.readListEnd(); + } + struct.setTableNamesIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenWriteIdsRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(CURRENT_TXN_ID_FIELD_DESC); + oprot.writeI64(struct.currentTxnId); + oprot.writeFieldEnd(); + if (struct.tableNames != null) { + oprot.writeFieldBegin(TABLE_NAMES_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tableNames.size())); + for (String _iter569 : struct.tableNames) + { + oprot.writeString(_iter569); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetOpenWriteIdsRequestTupleSchemeFactory implements SchemeFactory { + public GetOpenWriteIdsRequestTupleScheme getScheme() { + return new GetOpenWriteIdsRequestTupleScheme(); + } + } + + private static class GetOpenWriteIdsRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenWriteIdsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.currentTxnId); + { + oprot.writeI32(struct.tableNames.size()); + for (String _iter570 : struct.tableNames) + { + oprot.writeString(_iter570); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenWriteIdsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.currentTxnId = iprot.readI64(); + struct.setCurrentTxnIdIsSet(true); + { + org.apache.thrift.protocol.TList _list571 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tableNames = new ArrayList(_list571.size); + String _elem572; + for (int _i573 = 0; _i573 < _list571.size; ++_i573) + { + _elem572 = iprot.readString(); + struct.tableNames.add(_elem572); + } + } + struct.setTableNamesIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenWriteIdsResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenWriteIdsResponse.java new file mode 100644 index 0000000..613a563 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenWriteIdsResponse.java @@ -0,0 +1,443 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetOpenWriteIdsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOpenWriteIdsResponse"); + + private static final org.apache.thrift.protocol.TField OPEN_WRITE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("openWriteIds", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetOpenWriteIdsResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetOpenWriteIdsResponseTupleSchemeFactory()); + } + + private List openWriteIds; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + OPEN_WRITE_IDS((short)1, "openWriteIds"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // OPEN_WRITE_IDS + return OPEN_WRITE_IDS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.OPEN_WRITE_IDS, new org.apache.thrift.meta_data.FieldMetaData("openWriteIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, OpenWriteIds.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetOpenWriteIdsResponse.class, metaDataMap); + } + + public GetOpenWriteIdsResponse() { + } + + public GetOpenWriteIdsResponse( + List openWriteIds) + { + this(); + this.openWriteIds = openWriteIds; + } + + /** + * Performs a deep copy on other. + */ + public GetOpenWriteIdsResponse(GetOpenWriteIdsResponse other) { + if (other.isSetOpenWriteIds()) { + List __this__openWriteIds = new ArrayList(other.openWriteIds.size()); + for (OpenWriteIds other_element : other.openWriteIds) { + __this__openWriteIds.add(new OpenWriteIds(other_element)); + } + this.openWriteIds = __this__openWriteIds; + } + } + + public GetOpenWriteIdsResponse deepCopy() { + return new GetOpenWriteIdsResponse(this); + } + + @Override + public void clear() { + this.openWriteIds = null; + } + + public int getOpenWriteIdsSize() { + return (this.openWriteIds == null) ? 0 : this.openWriteIds.size(); + } + + public java.util.Iterator getOpenWriteIdsIterator() { + return (this.openWriteIds == null) ? null : this.openWriteIds.iterator(); + } + + public void addToOpenWriteIds(OpenWriteIds elem) { + if (this.openWriteIds == null) { + this.openWriteIds = new ArrayList(); + } + this.openWriteIds.add(elem); + } + + public List getOpenWriteIds() { + return this.openWriteIds; + } + + public void setOpenWriteIds(List openWriteIds) { + this.openWriteIds = openWriteIds; + } + + public void unsetOpenWriteIds() { + this.openWriteIds = null; + } + + /** Returns true if field openWriteIds is set (has been assigned a value) and false otherwise */ + public boolean isSetOpenWriteIds() { + return this.openWriteIds != null; + } + + public void setOpenWriteIdsIsSet(boolean value) { + if (!value) { + this.openWriteIds = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case OPEN_WRITE_IDS: + if (value == null) { + unsetOpenWriteIds(); + } else { + setOpenWriteIds((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case OPEN_WRITE_IDS: + return getOpenWriteIds(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case OPEN_WRITE_IDS: + return isSetOpenWriteIds(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetOpenWriteIdsResponse) + return this.equals((GetOpenWriteIdsResponse)that); + return false; + } + + public boolean equals(GetOpenWriteIdsResponse that) { + if (that == null) + return false; + + boolean this_present_openWriteIds = true && this.isSetOpenWriteIds(); + boolean that_present_openWriteIds = true && that.isSetOpenWriteIds(); + if (this_present_openWriteIds || that_present_openWriteIds) { + if (!(this_present_openWriteIds && that_present_openWriteIds)) + return false; + if (!this.openWriteIds.equals(that.openWriteIds)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_openWriteIds = true && (isSetOpenWriteIds()); + list.add(present_openWriteIds); + if (present_openWriteIds) + list.add(openWriteIds); + + return list.hashCode(); + } + + @Override + public int compareTo(GetOpenWriteIdsResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetOpenWriteIds()).compareTo(other.isSetOpenWriteIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetOpenWriteIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.openWriteIds, other.openWriteIds); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetOpenWriteIdsResponse("); + boolean first = true; + + sb.append("openWriteIds:"); + if (this.openWriteIds == null) { + sb.append("null"); + } else { + sb.append(this.openWriteIds); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetOpenWriteIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'openWriteIds' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetOpenWriteIdsResponseStandardSchemeFactory implements SchemeFactory { + public GetOpenWriteIdsResponseStandardScheme getScheme() { + return new GetOpenWriteIdsResponseStandardScheme(); + } + } + + private static class GetOpenWriteIdsResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenWriteIdsResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // OPEN_WRITE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list582 = iprot.readListBegin(); + struct.openWriteIds = new ArrayList(_list582.size); + OpenWriteIds _elem583; + for (int _i584 = 0; _i584 < _list582.size; ++_i584) + { + _elem583 = new OpenWriteIds(); + _elem583.read(iprot); + struct.openWriteIds.add(_elem583); + } + iprot.readListEnd(); + } + struct.setOpenWriteIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenWriteIdsResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.openWriteIds != null) { + oprot.writeFieldBegin(OPEN_WRITE_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.openWriteIds.size())); + for (OpenWriteIds _iter585 : struct.openWriteIds) + { + _iter585.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetOpenWriteIdsResponseTupleSchemeFactory implements SchemeFactory { + public GetOpenWriteIdsResponseTupleScheme getScheme() { + return new GetOpenWriteIdsResponseTupleScheme(); + } + } + + private static class GetOpenWriteIdsResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenWriteIdsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.openWriteIds.size()); + for (OpenWriteIds _iter586 : struct.openWriteIds) + { + _iter586.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenWriteIdsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list587 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.openWriteIds = new ArrayList(_list587.size); + OpenWriteIds _elem588; + for (int _i589 = 0; _i589 < _list587.size; ++_i589) + { + _elem588 = new OpenWriteIds(); + _elem588.read(iprot); + struct.openWriteIds.add(_elem588); + } + } + struct.setOpenWriteIdsIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java index 575737d..f80369e 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java @@ -525,13 +525,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest st case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list740 = iprot.readListBegin(); - struct.tblNames = new ArrayList(_list740.size); - String _elem741; - for (int _i742 = 0; _i742 < _list740.size; ++_i742) + org.apache.thrift.protocol.TList _list780 = iprot.readListBegin(); + struct.tblNames = new ArrayList(_list780.size); + String _elem781; + for (int _i782 = 0; _i782 < _list780.size; ++_i782) { - _elem741 = iprot.readString(); - struct.tblNames.add(_elem741); + _elem781 = iprot.readString(); + struct.tblNames.add(_elem781); } iprot.readListEnd(); } @@ -572,9 +572,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest s oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tblNames.size())); - for (String _iter743 : struct.tblNames) + for (String _iter783 : struct.tblNames) { - oprot.writeString(_iter743); + oprot.writeString(_iter783); } oprot.writeListEnd(); } @@ -617,9 +617,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest st if (struct.isSetTblNames()) { { oprot.writeI32(struct.tblNames.size()); - for (String _iter744 : struct.tblNames) + for (String _iter784 : struct.tblNames) { - oprot.writeString(_iter744); + oprot.writeString(_iter784); } } } @@ -636,13 +636,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tblNames = new ArrayList(_list745.size); - String _elem746; - for (int _i747 = 0; _i747 < _list745.size; ++_i747) + org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tblNames = new ArrayList(_list785.size); + String _elem786; + for (int _i787 = 0; _i787 < _list785.size; ++_i787) { - _elem746 = iprot.readString(); - struct.tblNames.add(_elem746); + _elem786 = iprot.readString(); + struct.tblNames.add(_elem786); } } struct.setTblNamesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java index 050d093..1719827 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesResult str case 1: // TABLES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list748 = iprot.readListBegin(); - struct.tables = new ArrayList
(_list748.size); - Table _elem749; - for (int _i750 = 0; _i750 < _list748.size; ++_i750) + org.apache.thrift.protocol.TList _list788 = iprot.readListBegin(); + struct.tables = new ArrayList
(_list788.size); + Table _elem789; + for (int _i790 = 0; _i790 < _list788.size; ++_i790) { - _elem749 = new Table(); - _elem749.read(iprot); - struct.tables.add(_elem749); + _elem789 = new Table(); + _elem789.read(iprot); + struct.tables.add(_elem789); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesResult st oprot.writeFieldBegin(TABLES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tables.size())); - for (Table _iter751 : struct.tables) + for (Table _iter791 : struct.tables) { - _iter751.write(oprot); + _iter791.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tables.size()); - for (Table _iter752 : struct.tables) + for (Table _iter792 : struct.tables) { - _iter752.write(oprot); + _iter792.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tables = new ArrayList
(_list753.size); - Table _elem754; - for (int _i755 = 0; _i755 < _list753.size; ++_i755) + org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tables = new ArrayList
(_list793.size); + Table _elem794; + for (int _i795 = 0; _i795 < _list793.size; ++_i795) { - _elem754 = new Table(); - _elem754.read(iprot); - struct.tables.add(_elem754); + _elem794 = new Table(); + _elem794.read(iprot); + struct.tables.add(_elem794); } } struct.setTablesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java index 828e94e..9a3cfef 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java @@ -453,13 +453,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 1: // ABORTED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set582 = iprot.readSetBegin(); - struct.aborted = new HashSet(2*_set582.size); - long _elem583; - for (int _i584 = 0; _i584 < _set582.size; ++_i584) + org.apache.thrift.protocol.TSet _set622 = iprot.readSetBegin(); + struct.aborted = new HashSet(2*_set622.size); + long _elem623; + for (int _i624 = 0; _i624 < _set622.size; ++_i624) { - _elem583 = iprot.readI64(); - struct.aborted.add(_elem583); + _elem623 = iprot.readI64(); + struct.aborted.add(_elem623); } iprot.readSetEnd(); } @@ -471,13 +471,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 2: // NOSUCH if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set585 = iprot.readSetBegin(); - struct.nosuch = new HashSet(2*_set585.size); - long _elem586; - for (int _i587 = 0; _i587 < _set585.size; ++_i587) + org.apache.thrift.protocol.TSet _set625 = iprot.readSetBegin(); + struct.nosuch = new HashSet(2*_set625.size); + long _elem626; + for (int _i627 = 0; _i627 < _set625.size; ++_i627) { - _elem586 = iprot.readI64(); - struct.nosuch.add(_elem586); + _elem626 = iprot.readI64(); + struct.nosuch.add(_elem626); } iprot.readSetEnd(); } @@ -503,9 +503,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(ABORTED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.aborted.size())); - for (long _iter588 : struct.aborted) + for (long _iter628 : struct.aborted) { - oprot.writeI64(_iter588); + oprot.writeI64(_iter628); } oprot.writeSetEnd(); } @@ -515,9 +515,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(NOSUCH_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.nosuch.size())); - for (long _iter589 : struct.nosuch) + for (long _iter629 : struct.nosuch) { - oprot.writeI64(_iter589); + oprot.writeI64(_iter629); } oprot.writeSetEnd(); } @@ -542,16 +542,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.aborted.size()); - for (long _iter590 : struct.aborted) + for (long _iter630 : struct.aborted) { - oprot.writeI64(_iter590); + oprot.writeI64(_iter630); } } { oprot.writeI32(struct.nosuch.size()); - for (long _iter591 : struct.nosuch) + for (long _iter631 : struct.nosuch) { - oprot.writeI64(_iter591); + oprot.writeI64(_iter631); } } } @@ -560,24 +560,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TSet _set592 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.aborted = new HashSet(2*_set592.size); - long _elem593; - for (int _i594 = 0; _i594 < _set592.size; ++_i594) + org.apache.thrift.protocol.TSet _set632 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.aborted = new HashSet(2*_set632.size); + long _elem633; + for (int _i634 = 0; _i634 < _set632.size; ++_i634) { - _elem593 = iprot.readI64(); - struct.aborted.add(_elem593); + _elem633 = iprot.readI64(); + struct.aborted.add(_elem633); } } struct.setAbortedIsSet(true); { - org.apache.thrift.protocol.TSet _set595 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.nosuch = new HashSet(2*_set595.size); - long _elem596; - for (int _i597 = 0; _i597 < _set595.size; ++_i597) + org.apache.thrift.protocol.TSet _set635 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.nosuch = new HashSet(2*_set635.size); + long _elem636; + for (int _i637 = 0; _i637 < _set635.size; ++_i637) { - _elem596 = iprot.readI64(); - struct.nosuch.add(_elem596); + _elem636 = iprot.readI64(); + struct.nosuch.add(_elem636); } } struct.setNosuchIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java index 184f9d5..02e519d 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java @@ -538,13 +538,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 2: // FILES_ADDED if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list640 = iprot.readListBegin(); - struct.filesAdded = new ArrayList(_list640.size); - String _elem641; - for (int _i642 = 0; _i642 < _list640.size; ++_i642) + org.apache.thrift.protocol.TList _list680 = iprot.readListBegin(); + struct.filesAdded = new ArrayList(_list680.size); + String _elem681; + for (int _i682 = 0; _i682 < _list680.size; ++_i682) { - _elem641 = iprot.readString(); - struct.filesAdded.add(_elem641); + _elem681 = iprot.readString(); + struct.filesAdded.add(_elem681); } iprot.readListEnd(); } @@ -556,13 +556,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 3: // FILES_ADDED_CHECKSUM if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list643 = iprot.readListBegin(); - struct.filesAddedChecksum = new ArrayList(_list643.size); - String _elem644; - for (int _i645 = 0; _i645 < _list643.size; ++_i645) + org.apache.thrift.protocol.TList _list683 = iprot.readListBegin(); + struct.filesAddedChecksum = new ArrayList(_list683.size); + String _elem684; + for (int _i685 = 0; _i685 < _list683.size; ++_i685) { - _elem644 = iprot.readString(); - struct.filesAddedChecksum.add(_elem644); + _elem684 = iprot.readString(); + struct.filesAddedChecksum.add(_elem684); } iprot.readListEnd(); } @@ -593,9 +593,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAdded.size())); - for (String _iter646 : struct.filesAdded) + for (String _iter686 : struct.filesAdded) { - oprot.writeString(_iter646); + oprot.writeString(_iter686); } oprot.writeListEnd(); } @@ -606,9 +606,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_CHECKSUM_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAddedChecksum.size())); - for (String _iter647 : struct.filesAddedChecksum) + for (String _iter687 : struct.filesAddedChecksum) { - oprot.writeString(_iter647); + oprot.writeString(_iter687); } oprot.writeListEnd(); } @@ -634,9 +634,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.filesAdded.size()); - for (String _iter648 : struct.filesAdded) + for (String _iter688 : struct.filesAdded) { - oprot.writeString(_iter648); + oprot.writeString(_iter688); } } BitSet optionals = new BitSet(); @@ -653,9 +653,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD if (struct.isSetFilesAddedChecksum()) { { oprot.writeI32(struct.filesAddedChecksum.size()); - for (String _iter649 : struct.filesAddedChecksum) + for (String _iter689 : struct.filesAddedChecksum) { - oprot.writeString(_iter649); + oprot.writeString(_iter689); } } } @@ -665,13 +665,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestData struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list650 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAdded = new ArrayList(_list650.size); - String _elem651; - for (int _i652 = 0; _i652 < _list650.size; ++_i652) + org.apache.thrift.protocol.TList _list690 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAdded = new ArrayList(_list690.size); + String _elem691; + for (int _i692 = 0; _i692 < _list690.size; ++_i692) { - _elem651 = iprot.readString(); - struct.filesAdded.add(_elem651); + _elem691 = iprot.readString(); + struct.filesAdded.add(_elem691); } } struct.setFilesAddedIsSet(true); @@ -682,13 +682,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestDa } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list653 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAddedChecksum = new ArrayList(_list653.size); - String _elem654; - for (int _i655 = 0; _i655 < _list653.size; ++_i655) + org.apache.thrift.protocol.TList _list693 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAddedChecksum = new ArrayList(_list693.size); + String _elem694; + for (int _i695 = 0; _i695 < _list693.size; ++_i695) { - _elem654 = iprot.readString(); - struct.filesAddedChecksum.add(_elem654); + _elem694 = iprot.readString(); + struct.filesAddedChecksum.add(_elem694); } } struct.setFilesAddedChecksumIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java index b5d17cc..579fb93 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java @@ -689,14 +689,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LockRequest struct) case 1: // COMPONENT if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list566 = iprot.readListBegin(); - struct.component = new ArrayList(_list566.size); - LockComponent _elem567; - for (int _i568 = 0; _i568 < _list566.size; ++_i568) + org.apache.thrift.protocol.TList _list606 = iprot.readListBegin(); + struct.component = new ArrayList(_list606.size); + LockComponent _elem607; + for (int _i608 = 0; _i608 < _list606.size; ++_i608) { - _elem567 = new LockComponent(); - _elem567.read(iprot); - struct.component.add(_elem567); + _elem607 = new LockComponent(); + _elem607.read(iprot); + struct.component.add(_elem607); } iprot.readListEnd(); } @@ -754,9 +754,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LockRequest struct oprot.writeFieldBegin(COMPONENT_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.component.size())); - for (LockComponent _iter569 : struct.component) + for (LockComponent _iter609 : struct.component) { - _iter569.write(oprot); + _iter609.write(oprot); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.component.size()); - for (LockComponent _iter570 : struct.component) + for (LockComponent _iter610 : struct.component) { - _iter570.write(oprot); + _iter610.write(oprot); } } oprot.writeString(struct.user); @@ -830,14 +830,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list571 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.component = new ArrayList(_list571.size); - LockComponent _elem572; - for (int _i573 = 0; _i573 < _list571.size; ++_i573) + org.apache.thrift.protocol.TList _list611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.component = new ArrayList(_list611.size); + LockComponent _elem612; + for (int _i613 = 0; _i613 < _list611.size; ++_i613) { - _elem572 = new LockComponent(); - _elem572.read(iprot); - struct.component.add(_elem572); + _elem612 = new LockComponent(); + _elem612.read(iprot); + struct.component.add(_elem612); } } struct.setComponentIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java index f217bf0..082ec31 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java @@ -533,13 +533,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Materialization str case 2: // TABLES_USED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set756 = iprot.readSetBegin(); - struct.tablesUsed = new HashSet(2*_set756.size); - String _elem757; - for (int _i758 = 0; _i758 < _set756.size; ++_i758) + org.apache.thrift.protocol.TSet _set796 = iprot.readSetBegin(); + struct.tablesUsed = new HashSet(2*_set796.size); + String _elem797; + for (int _i798 = 0; _i798 < _set796.size; ++_i798) { - _elem757 = iprot.readString(); - struct.tablesUsed.add(_elem757); + _elem797 = iprot.readString(); + struct.tablesUsed.add(_elem797); } iprot.readSetEnd(); } @@ -578,9 +578,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Materialization st oprot.writeFieldBegin(TABLES_USED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.tablesUsed.size())); - for (String _iter759 : struct.tablesUsed) + for (String _iter799 : struct.tablesUsed) { - oprot.writeString(_iter759); + oprot.writeString(_iter799); } oprot.writeSetEnd(); } @@ -609,9 +609,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Materialization str struct.materializationTable.write(oprot); { oprot.writeI32(struct.tablesUsed.size()); - for (String _iter760 : struct.tablesUsed) + for (String _iter800 : struct.tablesUsed) { - oprot.writeString(_iter760); + oprot.writeString(_iter800); } } oprot.writeI64(struct.invalidationTime); @@ -624,13 +624,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Materialization stru struct.materializationTable.read(iprot); struct.setMaterializationTableIsSet(true); { - org.apache.thrift.protocol.TSet _set761 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tablesUsed = new HashSet(2*_set761.size); - String _elem762; - for (int _i763 = 0; _i763 < _set761.size; ++_i763) + org.apache.thrift.protocol.TSet _set801 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tablesUsed = new HashSet(2*_set801.size); + String _elem802; + for (int _i803 = 0; _i803 < _set801.size; ++_i803) { - _elem762 = iprot.readString(); - struct.tablesUsed.add(_elem762); + _elem802 = iprot.readString(); + struct.tablesUsed.add(_elem802); } } struct.setTablesUsedIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java index eb57844..a5e5a6b 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRe case 1: // EVENTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list632 = iprot.readListBegin(); - struct.events = new ArrayList(_list632.size); - NotificationEvent _elem633; - for (int _i634 = 0; _i634 < _list632.size; ++_i634) + org.apache.thrift.protocol.TList _list672 = iprot.readListBegin(); + struct.events = new ArrayList(_list672.size); + NotificationEvent _elem673; + for (int _i674 = 0; _i674 < _list672.size; ++_i674) { - _elem633 = new NotificationEvent(); - _elem633.read(iprot); - struct.events.add(_elem633); + _elem673 = new NotificationEvent(); + _elem673.read(iprot); + struct.events.add(_elem673); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventR oprot.writeFieldBegin(EVENTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.events.size())); - for (NotificationEvent _iter635 : struct.events) + for (NotificationEvent _iter675 : struct.events) { - _iter635.write(oprot); + _iter675.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.events.size()); - for (NotificationEvent _iter636 : struct.events) + for (NotificationEvent _iter676 : struct.events) { - _iter636.write(oprot); + _iter676.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list637 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.events = new ArrayList(_list637.size); - NotificationEvent _elem638; - for (int _i639 = 0; _i639 < _list637.size; ++_i639) + org.apache.thrift.protocol.TList _list677 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.events = new ArrayList(_list677.size); + NotificationEvent _elem678; + for (int _i679 = 0; _i679 < _list677.size; ++_i679) { - _elem638 = new NotificationEvent(); - _elem638.read(iprot); - struct.events.add(_elem638); + _elem678 = new NotificationEvent(); + _elem678.read(iprot); + struct.events.add(_elem678); } } struct.setEventsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenWriteIds.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenWriteIds.java new file mode 100644 index 0000000..2665711 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenWriteIds.java @@ -0,0 +1,851 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class OpenWriteIds implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenWriteIds"); + + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField WRITE_ID_HIGH_WATER_MARK_FIELD_DESC = new org.apache.thrift.protocol.TField("writeIdHighWaterMark", org.apache.thrift.protocol.TType.I64, (short)2); + private static final org.apache.thrift.protocol.TField OPEN_WRITE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("openWriteIds", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField MIN_WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("minWriteId", org.apache.thrift.protocol.TType.I64, (short)4); + private static final org.apache.thrift.protocol.TField ABORTED_BITS_FIELD_DESC = new org.apache.thrift.protocol.TField("abortedBits", org.apache.thrift.protocol.TType.STRING, (short)5); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new OpenWriteIdsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new OpenWriteIdsTupleSchemeFactory()); + } + + private String tableName; // required + private long writeIdHighWaterMark; // required + private List openWriteIds; // required + private long minWriteId; // optional + private ByteBuffer abortedBits; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TABLE_NAME((short)1, "tableName"), + WRITE_ID_HIGH_WATER_MARK((short)2, "writeIdHighWaterMark"), + OPEN_WRITE_IDS((short)3, "openWriteIds"), + MIN_WRITE_ID((short)4, "minWriteId"), + ABORTED_BITS((short)5, "abortedBits"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TABLE_NAME + return TABLE_NAME; + case 2: // WRITE_ID_HIGH_WATER_MARK + return WRITE_ID_HIGH_WATER_MARK; + case 3: // OPEN_WRITE_IDS + return OPEN_WRITE_IDS; + case 4: // MIN_WRITE_ID + return MIN_WRITE_ID; + case 5: // ABORTED_BITS + return ABORTED_BITS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __WRITEIDHIGHWATERMARK_ISSET_ID = 0; + private static final int __MINWRITEID_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.MIN_WRITE_ID}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.WRITE_ID_HIGH_WATER_MARK, new org.apache.thrift.meta_data.FieldMetaData("writeIdHighWaterMark", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.OPEN_WRITE_IDS, new org.apache.thrift.meta_data.FieldMetaData("openWriteIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); + tmpMap.put(_Fields.MIN_WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("minWriteId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.ABORTED_BITS, new org.apache.thrift.meta_data.FieldMetaData("abortedBits", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(OpenWriteIds.class, metaDataMap); + } + + public OpenWriteIds() { + } + + public OpenWriteIds( + String tableName, + long writeIdHighWaterMark, + List openWriteIds, + ByteBuffer abortedBits) + { + this(); + this.tableName = tableName; + this.writeIdHighWaterMark = writeIdHighWaterMark; + setWriteIdHighWaterMarkIsSet(true); + this.openWriteIds = openWriteIds; + this.abortedBits = org.apache.thrift.TBaseHelper.copyBinary(abortedBits); + } + + /** + * Performs a deep copy on other. + */ + public OpenWriteIds(OpenWriteIds other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetTableName()) { + this.tableName = other.tableName; + } + this.writeIdHighWaterMark = other.writeIdHighWaterMark; + if (other.isSetOpenWriteIds()) { + List __this__openWriteIds = new ArrayList(other.openWriteIds); + this.openWriteIds = __this__openWriteIds; + } + this.minWriteId = other.minWriteId; + if (other.isSetAbortedBits()) { + this.abortedBits = org.apache.thrift.TBaseHelper.copyBinary(other.abortedBits); + } + } + + public OpenWriteIds deepCopy() { + return new OpenWriteIds(this); + } + + @Override + public void clear() { + this.tableName = null; + setWriteIdHighWaterMarkIsSet(false); + this.writeIdHighWaterMark = 0; + this.openWriteIds = null; + setMinWriteIdIsSet(false); + this.minWriteId = 0; + this.abortedBits = null; + } + + public String getTableName() { + return this.tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public void unsetTableName() { + this.tableName = null; + } + + /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ + public boolean isSetTableName() { + return this.tableName != null; + } + + public void setTableNameIsSet(boolean value) { + if (!value) { + this.tableName = null; + } + } + + public long getWriteIdHighWaterMark() { + return this.writeIdHighWaterMark; + } + + public void setWriteIdHighWaterMark(long writeIdHighWaterMark) { + this.writeIdHighWaterMark = writeIdHighWaterMark; + setWriteIdHighWaterMarkIsSet(true); + } + + public void unsetWriteIdHighWaterMark() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEIDHIGHWATERMARK_ISSET_ID); + } + + /** Returns true if field writeIdHighWaterMark is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteIdHighWaterMark() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEIDHIGHWATERMARK_ISSET_ID); + } + + public void setWriteIdHighWaterMarkIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEIDHIGHWATERMARK_ISSET_ID, value); + } + + public int getOpenWriteIdsSize() { + return (this.openWriteIds == null) ? 0 : this.openWriteIds.size(); + } + + public java.util.Iterator getOpenWriteIdsIterator() { + return (this.openWriteIds == null) ? null : this.openWriteIds.iterator(); + } + + public void addToOpenWriteIds(long elem) { + if (this.openWriteIds == null) { + this.openWriteIds = new ArrayList(); + } + this.openWriteIds.add(elem); + } + + public List getOpenWriteIds() { + return this.openWriteIds; + } + + public void setOpenWriteIds(List openWriteIds) { + this.openWriteIds = openWriteIds; + } + + public void unsetOpenWriteIds() { + this.openWriteIds = null; + } + + /** Returns true if field openWriteIds is set (has been assigned a value) and false otherwise */ + public boolean isSetOpenWriteIds() { + return this.openWriteIds != null; + } + + public void setOpenWriteIdsIsSet(boolean value) { + if (!value) { + this.openWriteIds = null; + } + } + + public long getMinWriteId() { + return this.minWriteId; + } + + public void setMinWriteId(long minWriteId) { + this.minWriteId = minWriteId; + setMinWriteIdIsSet(true); + } + + public void unsetMinWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MINWRITEID_ISSET_ID); + } + + /** Returns true if field minWriteId is set (has been assigned a value) and false otherwise */ + public boolean isSetMinWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __MINWRITEID_ISSET_ID); + } + + public void setMinWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MINWRITEID_ISSET_ID, value); + } + + public byte[] getAbortedBits() { + setAbortedBits(org.apache.thrift.TBaseHelper.rightSize(abortedBits)); + return abortedBits == null ? null : abortedBits.array(); + } + + public ByteBuffer bufferForAbortedBits() { + return org.apache.thrift.TBaseHelper.copyBinary(abortedBits); + } + + public void setAbortedBits(byte[] abortedBits) { + this.abortedBits = abortedBits == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(abortedBits, abortedBits.length)); + } + + public void setAbortedBits(ByteBuffer abortedBits) { + this.abortedBits = org.apache.thrift.TBaseHelper.copyBinary(abortedBits); + } + + public void unsetAbortedBits() { + this.abortedBits = null; + } + + /** Returns true if field abortedBits is set (has been assigned a value) and false otherwise */ + public boolean isSetAbortedBits() { + return this.abortedBits != null; + } + + public void setAbortedBitsIsSet(boolean value) { + if (!value) { + this.abortedBits = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TABLE_NAME: + if (value == null) { + unsetTableName(); + } else { + setTableName((String)value); + } + break; + + case WRITE_ID_HIGH_WATER_MARK: + if (value == null) { + unsetWriteIdHighWaterMark(); + } else { + setWriteIdHighWaterMark((Long)value); + } + break; + + case OPEN_WRITE_IDS: + if (value == null) { + unsetOpenWriteIds(); + } else { + setOpenWriteIds((List)value); + } + break; + + case MIN_WRITE_ID: + if (value == null) { + unsetMinWriteId(); + } else { + setMinWriteId((Long)value); + } + break; + + case ABORTED_BITS: + if (value == null) { + unsetAbortedBits(); + } else { + setAbortedBits((ByteBuffer)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TABLE_NAME: + return getTableName(); + + case WRITE_ID_HIGH_WATER_MARK: + return getWriteIdHighWaterMark(); + + case OPEN_WRITE_IDS: + return getOpenWriteIds(); + + case MIN_WRITE_ID: + return getMinWriteId(); + + case ABORTED_BITS: + return getAbortedBits(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TABLE_NAME: + return isSetTableName(); + case WRITE_ID_HIGH_WATER_MARK: + return isSetWriteIdHighWaterMark(); + case OPEN_WRITE_IDS: + return isSetOpenWriteIds(); + case MIN_WRITE_ID: + return isSetMinWriteId(); + case ABORTED_BITS: + return isSetAbortedBits(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof OpenWriteIds) + return this.equals((OpenWriteIds)that); + return false; + } + + public boolean equals(OpenWriteIds that) { + if (that == null) + return false; + + boolean this_present_tableName = true && this.isSetTableName(); + boolean that_present_tableName = true && that.isSetTableName(); + if (this_present_tableName || that_present_tableName) { + if (!(this_present_tableName && that_present_tableName)) + return false; + if (!this.tableName.equals(that.tableName)) + return false; + } + + boolean this_present_writeIdHighWaterMark = true; + boolean that_present_writeIdHighWaterMark = true; + if (this_present_writeIdHighWaterMark || that_present_writeIdHighWaterMark) { + if (!(this_present_writeIdHighWaterMark && that_present_writeIdHighWaterMark)) + return false; + if (this.writeIdHighWaterMark != that.writeIdHighWaterMark) + return false; + } + + boolean this_present_openWriteIds = true && this.isSetOpenWriteIds(); + boolean that_present_openWriteIds = true && that.isSetOpenWriteIds(); + if (this_present_openWriteIds || that_present_openWriteIds) { + if (!(this_present_openWriteIds && that_present_openWriteIds)) + return false; + if (!this.openWriteIds.equals(that.openWriteIds)) + return false; + } + + boolean this_present_minWriteId = true && this.isSetMinWriteId(); + boolean that_present_minWriteId = true && that.isSetMinWriteId(); + if (this_present_minWriteId || that_present_minWriteId) { + if (!(this_present_minWriteId && that_present_minWriteId)) + return false; + if (this.minWriteId != that.minWriteId) + return false; + } + + boolean this_present_abortedBits = true && this.isSetAbortedBits(); + boolean that_present_abortedBits = true && that.isSetAbortedBits(); + if (this_present_abortedBits || that_present_abortedBits) { + if (!(this_present_abortedBits && that_present_abortedBits)) + return false; + if (!this.abortedBits.equals(that.abortedBits)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_tableName = true && (isSetTableName()); + list.add(present_tableName); + if (present_tableName) + list.add(tableName); + + boolean present_writeIdHighWaterMark = true; + list.add(present_writeIdHighWaterMark); + if (present_writeIdHighWaterMark) + list.add(writeIdHighWaterMark); + + boolean present_openWriteIds = true && (isSetOpenWriteIds()); + list.add(present_openWriteIds); + if (present_openWriteIds) + list.add(openWriteIds); + + boolean present_minWriteId = true && (isSetMinWriteId()); + list.add(present_minWriteId); + if (present_minWriteId) + list.add(minWriteId); + + boolean present_abortedBits = true && (isSetAbortedBits()); + list.add(present_abortedBits); + if (present_abortedBits) + list.add(abortedBits); + + return list.hashCode(); + } + + @Override + public int compareTo(OpenWriteIds other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteIdHighWaterMark()).compareTo(other.isSetWriteIdHighWaterMark()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteIdHighWaterMark()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeIdHighWaterMark, other.writeIdHighWaterMark); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetOpenWriteIds()).compareTo(other.isSetOpenWriteIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetOpenWriteIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.openWriteIds, other.openWriteIds); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMinWriteId()).compareTo(other.isSetMinWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMinWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.minWriteId, other.minWriteId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetAbortedBits()).compareTo(other.isSetAbortedBits()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetAbortedBits()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.abortedBits, other.abortedBits); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("OpenWriteIds("); + boolean first = true; + + sb.append("tableName:"); + if (this.tableName == null) { + sb.append("null"); + } else { + sb.append(this.tableName); + } + first = false; + if (!first) sb.append(", "); + sb.append("writeIdHighWaterMark:"); + sb.append(this.writeIdHighWaterMark); + first = false; + if (!first) sb.append(", "); + sb.append("openWriteIds:"); + if (this.openWriteIds == null) { + sb.append("null"); + } else { + sb.append(this.openWriteIds); + } + first = false; + if (isSetMinWriteId()) { + if (!first) sb.append(", "); + sb.append("minWriteId:"); + sb.append(this.minWriteId); + first = false; + } + if (!first) sb.append(", "); + sb.append("abortedBits:"); + if (this.abortedBits == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.abortedBits, sb); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetTableName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString()); + } + + if (!isSetWriteIdHighWaterMark()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeIdHighWaterMark' is unset! Struct:" + toString()); + } + + if (!isSetOpenWriteIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'openWriteIds' is unset! Struct:" + toString()); + } + + if (!isSetAbortedBits()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'abortedBits' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class OpenWriteIdsStandardSchemeFactory implements SchemeFactory { + public OpenWriteIdsStandardScheme getScheme() { + return new OpenWriteIdsStandardScheme(); + } + } + + private static class OpenWriteIdsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, OpenWriteIds struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // WRITE_ID_HIGH_WATER_MARK + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeIdHighWaterMark = iprot.readI64(); + struct.setWriteIdHighWaterMarkIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // OPEN_WRITE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list574 = iprot.readListBegin(); + struct.openWriteIds = new ArrayList(_list574.size); + long _elem575; + for (int _i576 = 0; _i576 < _list574.size; ++_i576) + { + _elem575 = iprot.readI64(); + struct.openWriteIds.add(_elem575); + } + iprot.readListEnd(); + } + struct.setOpenWriteIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // MIN_WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.minWriteId = iprot.readI64(); + struct.setMinWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // ABORTED_BITS + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.abortedBits = iprot.readBinary(); + struct.setAbortedBitsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, OpenWriteIds struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.tableName != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.tableName); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(WRITE_ID_HIGH_WATER_MARK_FIELD_DESC); + oprot.writeI64(struct.writeIdHighWaterMark); + oprot.writeFieldEnd(); + if (struct.openWriteIds != null) { + oprot.writeFieldBegin(OPEN_WRITE_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.openWriteIds.size())); + for (long _iter577 : struct.openWriteIds) + { + oprot.writeI64(_iter577); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.isSetMinWriteId()) { + oprot.writeFieldBegin(MIN_WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.minWriteId); + oprot.writeFieldEnd(); + } + if (struct.abortedBits != null) { + oprot.writeFieldBegin(ABORTED_BITS_FIELD_DESC); + oprot.writeBinary(struct.abortedBits); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class OpenWriteIdsTupleSchemeFactory implements SchemeFactory { + public OpenWriteIdsTupleScheme getScheme() { + return new OpenWriteIdsTupleScheme(); + } + } + + private static class OpenWriteIdsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, OpenWriteIds struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.tableName); + oprot.writeI64(struct.writeIdHighWaterMark); + { + oprot.writeI32(struct.openWriteIds.size()); + for (long _iter578 : struct.openWriteIds) + { + oprot.writeI64(_iter578); + } + } + oprot.writeBinary(struct.abortedBits); + BitSet optionals = new BitSet(); + if (struct.isSetMinWriteId()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetMinWriteId()) { + oprot.writeI64(struct.minWriteId); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, OpenWriteIds struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + struct.writeIdHighWaterMark = iprot.readI64(); + struct.setWriteIdHighWaterMarkIsSet(true); + { + org.apache.thrift.protocol.TList _list579 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.openWriteIds = new ArrayList(_list579.size); + long _elem580; + for (int _i581 = 0; _i581 < _list579.size; ++_i581) + { + _elem580 = iprot.readI64(); + struct.openWriteIds.add(_elem580); + } + } + struct.setOpenWriteIdsIsSet(true); + struct.abortedBits = iprot.readBinary(); + struct.setAbortedBitsIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.minWriteId = iprot.readI64(); + struct.setMinWriteIdIsSet(true); + } + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java index 5896fd9..2461db6 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java @@ -547,13 +547,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list700 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list700.size); - long _elem701; - for (int _i702 = 0; _i702 < _list700.size; ++_i702) + org.apache.thrift.protocol.TList _list740 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list740.size); + long _elem741; + for (int _i742 = 0; _i742 < _list740.size; ++_i742) { - _elem701 = iprot.readI64(); - struct.fileIds.add(_elem701); + _elem741 = iprot.readI64(); + struct.fileIds.add(_elem741); } iprot.readListEnd(); } @@ -565,13 +565,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 2: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list703 = iprot.readListBegin(); - struct.metadata = new ArrayList(_list703.size); - ByteBuffer _elem704; - for (int _i705 = 0; _i705 < _list703.size; ++_i705) + org.apache.thrift.protocol.TList _list743 = iprot.readListBegin(); + struct.metadata = new ArrayList(_list743.size); + ByteBuffer _elem744; + for (int _i745 = 0; _i745 < _list743.size; ++_i745) { - _elem704 = iprot.readBinary(); - struct.metadata.add(_elem704); + _elem744 = iprot.readBinary(); + struct.metadata.add(_elem744); } iprot.readListEnd(); } @@ -605,9 +605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter706 : struct.fileIds) + for (long _iter746 : struct.fileIds) { - oprot.writeI64(_iter706); + oprot.writeI64(_iter746); } oprot.writeListEnd(); } @@ -617,9 +617,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (ByteBuffer _iter707 : struct.metadata) + for (ByteBuffer _iter747 : struct.metadata) { - oprot.writeBinary(_iter707); + oprot.writeBinary(_iter747); } oprot.writeListEnd(); } @@ -651,16 +651,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter708 : struct.fileIds) + for (long _iter748 : struct.fileIds) { - oprot.writeI64(_iter708); + oprot.writeI64(_iter748); } } { oprot.writeI32(struct.metadata.size()); - for (ByteBuffer _iter709 : struct.metadata) + for (ByteBuffer _iter749 : struct.metadata) { - oprot.writeBinary(_iter709); + oprot.writeBinary(_iter749); } } BitSet optionals = new BitSet(); @@ -677,24 +677,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list710 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list710.size); - long _elem711; - for (int _i712 = 0; _i712 < _list710.size; ++_i712) + org.apache.thrift.protocol.TList _list750 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list750.size); + long _elem751; + for (int _i752 = 0; _i752 < _list750.size; ++_i752) { - _elem711 = iprot.readI64(); - struct.fileIds.add(_elem711); + _elem751 = iprot.readI64(); + struct.fileIds.add(_elem751); } } struct.setFileIdsIsSet(true); { - org.apache.thrift.protocol.TList _list713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new ArrayList(_list713.size); - ByteBuffer _elem714; - for (int _i715 = 0; _i715 < _list713.size; ++_i715) + org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new ArrayList(_list753.size); + ByteBuffer _elem754; + for (int _i755 = 0; _i755 < _list753.size; ++_i755) { - _elem714 = iprot.readBinary(); - struct.metadata.add(_elem714); + _elem754 = iprot.readBinary(); + struct.metadata.add(_elem754); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java index 1b50602..2bd465d 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse case 1: // COMPACTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list608 = iprot.readListBegin(); - struct.compacts = new ArrayList(_list608.size); - ShowCompactResponseElement _elem609; - for (int _i610 = 0; _i610 < _list608.size; ++_i610) + org.apache.thrift.protocol.TList _list648 = iprot.readListBegin(); + struct.compacts = new ArrayList(_list648.size); + ShowCompactResponseElement _elem649; + for (int _i650 = 0; _i650 < _list648.size; ++_i650) { - _elem609 = new ShowCompactResponseElement(); - _elem609.read(iprot); - struct.compacts.add(_elem609); + _elem649 = new ShowCompactResponseElement(); + _elem649.read(iprot); + struct.compacts.add(_elem649); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeFieldBegin(COMPACTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.compacts.size())); - for (ShowCompactResponseElement _iter611 : struct.compacts) + for (ShowCompactResponseElement _iter651 : struct.compacts) { - _iter611.write(oprot); + _iter651.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.compacts.size()); - for (ShowCompactResponseElement _iter612 : struct.compacts) + for (ShowCompactResponseElement _iter652 : struct.compacts) { - _iter612.write(oprot); + _iter652.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list613 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.compacts = new ArrayList(_list613.size); - ShowCompactResponseElement _elem614; - for (int _i615 = 0; _i615 < _list613.size; ++_i615) + org.apache.thrift.protocol.TList _list653 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.compacts = new ArrayList(_list653.size); + ShowCompactResponseElement _elem654; + for (int _i655 = 0; _i655 < _list653.size; ++_i655) { - _elem614 = new ShowCompactResponseElement(); - _elem614.read(iprot); - struct.compacts.add(_elem614); + _elem654 = new ShowCompactResponseElement(); + _elem654.read(iprot); + struct.compacts.add(_elem654); } } struct.setCompactsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java index a21a191..695df22 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowLocksResponse s case 1: // LOCKS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list574 = iprot.readListBegin(); - struct.locks = new ArrayList(_list574.size); - ShowLocksResponseElement _elem575; - for (int _i576 = 0; _i576 < _list574.size; ++_i576) + org.apache.thrift.protocol.TList _list614 = iprot.readListBegin(); + struct.locks = new ArrayList(_list614.size); + ShowLocksResponseElement _elem615; + for (int _i616 = 0; _i616 < _list614.size; ++_i616) { - _elem575 = new ShowLocksResponseElement(); - _elem575.read(iprot); - struct.locks.add(_elem575); + _elem615 = new ShowLocksResponseElement(); + _elem615.read(iprot); + struct.locks.add(_elem615); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowLocksResponse oprot.writeFieldBegin(LOCKS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.locks.size())); - for (ShowLocksResponseElement _iter577 : struct.locks) + for (ShowLocksResponseElement _iter617 : struct.locks) { - _iter577.write(oprot); + _iter617.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse s if (struct.isSetLocks()) { { oprot.writeI32(struct.locks.size()); - for (ShowLocksResponseElement _iter578 : struct.locks) + for (ShowLocksResponseElement _iter618 : struct.locks) { - _iter578.write(oprot); + _iter618.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list579 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.locks = new ArrayList(_list579.size); - ShowLocksResponseElement _elem580; - for (int _i581 = 0; _i581 < _list579.size; ++_i581) + org.apache.thrift.protocol.TList _list619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.locks = new ArrayList(_list619.size); + ShowLocksResponseElement _elem620; + for (int _i621 = 0; _i621 < _list619.size; ++_i621) { - _elem580 = new ShowLocksResponseElement(); - _elem580.read(iprot); - struct.locks.add(_elem580); + _elem620 = new ShowLocksResponseElement(); + _elem620.read(iprot); + struct.locks.add(_elem620); } } struct.setLocksIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index aaca408..16e84b8 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -326,6 +326,12 @@ public void commit_txn(CommitTxnRequest rqst) throws NoSuchTxnException, TxnAbortedException, org.apache.thrift.TException; + public GetOpenWriteIdsResponse get_open_write_ids(GetOpenWriteIdsRequest rqst) throws NoSuchTxnException, MetaException, org.apache.thrift.TException; + + public void add_transactional_table(AddTransactionalTableRequest rqst) throws MetaException, org.apache.thrift.TException; + + public AllocateTableWriteIdResponse allocate_table_write_id(AllocateTableWriteIdRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException, org.apache.thrift.TException; + public LockResponse lock(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, org.apache.thrift.TException; public LockResponse check_lock(CheckLockRequest rqst) throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, org.apache.thrift.TException; @@ -696,6 +702,12 @@ public void commit_txn(CommitTxnRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_open_write_ids(GetOpenWriteIdsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void add_transactional_table(AddTransactionalTableRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void allocate_table_write_id(AllocateTableWriteIdRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void lock(LockRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void check_lock(CheckLockRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -4974,6 +4986,90 @@ public void recv_commit_txn() throws NoSuchTxnException, TxnAbortedException, or return; } + public GetOpenWriteIdsResponse get_open_write_ids(GetOpenWriteIdsRequest rqst) throws NoSuchTxnException, MetaException, org.apache.thrift.TException + { + send_get_open_write_ids(rqst); + return recv_get_open_write_ids(); + } + + public void send_get_open_write_ids(GetOpenWriteIdsRequest rqst) throws org.apache.thrift.TException + { + get_open_write_ids_args args = new get_open_write_ids_args(); + args.setRqst(rqst); + sendBase("get_open_write_ids", args); + } + + public GetOpenWriteIdsResponse recv_get_open_write_ids() throws NoSuchTxnException, MetaException, org.apache.thrift.TException + { + get_open_write_ids_result result = new get_open_write_ids_result(); + receiveBase(result, "get_open_write_ids"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_open_write_ids failed: unknown result"); + } + + public void add_transactional_table(AddTransactionalTableRequest rqst) throws MetaException, org.apache.thrift.TException + { + send_add_transactional_table(rqst); + recv_add_transactional_table(); + } + + public void send_add_transactional_table(AddTransactionalTableRequest rqst) throws org.apache.thrift.TException + { + add_transactional_table_args args = new add_transactional_table_args(); + args.setRqst(rqst); + sendBase("add_transactional_table", args); + } + + public void recv_add_transactional_table() throws MetaException, org.apache.thrift.TException + { + add_transactional_table_result result = new add_transactional_table_result(); + receiveBase(result, "add_transactional_table"); + if (result.o1 != null) { + throw result.o1; + } + return; + } + + public AllocateTableWriteIdResponse allocate_table_write_id(AllocateTableWriteIdRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException, org.apache.thrift.TException + { + send_allocate_table_write_id(rqst); + return recv_allocate_table_write_id(); + } + + public void send_allocate_table_write_id(AllocateTableWriteIdRequest rqst) throws org.apache.thrift.TException + { + allocate_table_write_id_args args = new allocate_table_write_id_args(); + args.setRqst(rqst); + sendBase("allocate_table_write_id", args); + } + + public AllocateTableWriteIdResponse recv_allocate_table_write_id() throws NoSuchTxnException, TxnAbortedException, MetaException, org.apache.thrift.TException + { + allocate_table_write_id_result result = new allocate_table_write_id_result(); + receiveBase(result, "allocate_table_write_id"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "allocate_table_write_id failed: unknown result"); + } + public LockResponse lock(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, org.apache.thrift.TException { send_lock(rqst); @@ -11164,6 +11260,102 @@ public void getResult() throws NoSuchTxnException, TxnAbortedException, org.apac } } + public void get_open_write_ids(GetOpenWriteIdsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_open_write_ids_call method_call = new get_open_write_ids_call(rqst, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_write_ids_call extends org.apache.thrift.async.TAsyncMethodCall { + private GetOpenWriteIdsRequest rqst; + public get_open_write_ids_call(GetOpenWriteIdsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.rqst = rqst; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_open_write_ids", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_open_write_ids_args args = new get_open_write_ids_args(); + args.setRqst(rqst); + args.write(prot); + prot.writeMessageEnd(); + } + + public GetOpenWriteIdsResponse getResult() throws NoSuchTxnException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_open_write_ids(); + } + } + + public void add_transactional_table(AddTransactionalTableRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + add_transactional_table_call method_call = new add_transactional_table_call(rqst, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_transactional_table_call extends org.apache.thrift.async.TAsyncMethodCall { + private AddTransactionalTableRequest rqst; + public add_transactional_table_call(AddTransactionalTableRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.rqst = rqst; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("add_transactional_table", org.apache.thrift.protocol.TMessageType.CALL, 0)); + add_transactional_table_args args = new add_transactional_table_args(); + args.setRqst(rqst); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_add_transactional_table(); + } + } + + public void allocate_table_write_id(AllocateTableWriteIdRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + allocate_table_write_id_call method_call = new allocate_table_write_id_call(rqst, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class allocate_table_write_id_call extends org.apache.thrift.async.TAsyncMethodCall { + private AllocateTableWriteIdRequest rqst; + public allocate_table_write_id_call(AllocateTableWriteIdRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.rqst = rqst; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("allocate_table_write_id", org.apache.thrift.protocol.TMessageType.CALL, 0)); + allocate_table_write_id_args args = new allocate_table_write_id_args(); + args.setRqst(rqst); + args.write(prot); + prot.writeMessageEnd(); + } + + public AllocateTableWriteIdResponse getResult() throws NoSuchTxnException, TxnAbortedException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_allocate_table_write_id(); + } + } + public void lock(LockRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); lock_call method_call = new lock_call(rqst, resultHandler, this, ___protocolFactory, ___transport); @@ -12634,6 +12826,9 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public get_open_write_ids() { + super("get_open_write_ids"); + } + + public get_open_write_ids_args getEmptyArgsInstance() { + return new get_open_write_ids_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_open_write_ids_result getResult(I iface, get_open_write_ids_args args) throws org.apache.thrift.TException { + get_open_write_ids_result result = new get_open_write_ids_result(); + try { + result.success = iface.get_open_write_ids(args.rqst); + } catch (NoSuchTxnException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_transactional_table extends org.apache.thrift.ProcessFunction { + public add_transactional_table() { + super("add_transactional_table"); + } + + public add_transactional_table_args getEmptyArgsInstance() { + return new add_transactional_table_args(); + } + + protected boolean isOneway() { + return false; + } + + public add_transactional_table_result getResult(I iface, add_transactional_table_args args) throws org.apache.thrift.TException { + add_transactional_table_result result = new add_transactional_table_result(); + try { + iface.add_transactional_table(args.rqst); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class allocate_table_write_id extends org.apache.thrift.ProcessFunction { + public allocate_table_write_id() { + super("allocate_table_write_id"); + } + + public allocate_table_write_id_args getEmptyArgsInstance() { + return new allocate_table_write_id_args(); + } + + protected boolean isOneway() { + return false; + } + + public allocate_table_write_id_result getResult(I iface, allocate_table_write_id_args args) throws org.apache.thrift.TException { + allocate_table_write_id_result result = new allocate_table_write_id_result(); + try { + result.success = iface.allocate_table_write_id(args.rqst); + } catch (NoSuchTxnException o1) { + result.o1 = o1; + } catch (TxnAbortedException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class lock extends org.apache.thrift.ProcessFunction { public lock() { super("lock"); @@ -17517,6 +17790,9 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { - public lock() { - super("lock"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_write_ids extends org.apache.thrift.AsyncProcessFunction { + public get_open_write_ids() { + super("get_open_write_ids"); } - public lock_args getEmptyArgsInstance() { - return new lock_args(); + public get_open_write_ids_args getEmptyArgsInstance() { + return new get_open_write_ids_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(LockResponse o) { - lock_result result = new lock_result(); + return new AsyncMethodCallback() { + public void onComplete(GetOpenWriteIdsResponse o) { + get_open_write_ids_result result = new get_open_write_ids_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -26351,14 +26627,14 @@ public void onComplete(LockResponse o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - lock_result result = new lock_result(); + get_open_write_ids_result result = new get_open_write_ids_result(); if (e instanceof NoSuchTxnException) { result.o1 = (NoSuchTxnException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof TxnAbortedException) { - result.o2 = (TxnAbortedException) e; + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; result.setO2IsSet(true); msg = result; } @@ -26382,25 +26658,81 @@ protected boolean isOneway() { return false; } - public void start(I iface, lock_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.lock(args.rqst,resultHandler); + public void start(I iface, get_open_write_ids_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_open_write_ids(args.rqst,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class check_lock extends org.apache.thrift.AsyncProcessFunction { - public check_lock() { - super("check_lock"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_transactional_table extends org.apache.thrift.AsyncProcessFunction { + public add_transactional_table() { + super("add_transactional_table"); } - public check_lock_args getEmptyArgsInstance() { - return new check_lock_args(); + public add_transactional_table_args getEmptyArgsInstance() { + return new add_transactional_table_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(LockResponse o) { - check_lock_result result = new check_lock_result(); + return new AsyncMethodCallback() { + public void onComplete(Void o) { + add_transactional_table_result result = new add_transactional_table_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + add_transactional_table_result result = new add_transactional_table_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, add_transactional_table_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.add_transactional_table(args.rqst,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class allocate_table_write_id extends org.apache.thrift.AsyncProcessFunction { + public allocate_table_write_id() { + super("allocate_table_write_id"); + } + + public allocate_table_write_id_args getEmptyArgsInstance() { + return new allocate_table_write_id_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(AllocateTableWriteIdResponse o) { + allocate_table_write_id_result result = new allocate_table_write_id_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -26413,7 +26745,7 @@ public void onComplete(LockResponse o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - check_lock_result result = new check_lock_result(); + allocate_table_write_id_result result = new allocate_table_write_id_result(); if (e instanceof NoSuchTxnException) { result.o1 = (NoSuchTxnException) e; result.setO1IsSet(true); @@ -26424,8 +26756,137 @@ else if (e instanceof TxnAbortedException) { result.setO2IsSet(true); msg = result; } - else if (e instanceof NoSuchLockException) { - result.o3 = (NoSuchLockException) e; + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, allocate_table_write_id_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.allocate_table_write_id(args.rqst,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class lock extends org.apache.thrift.AsyncProcessFunction { + public lock() { + super("lock"); + } + + public lock_args getEmptyArgsInstance() { + return new lock_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(LockResponse o) { + lock_result result = new lock_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + lock_result result = new lock_result(); + if (e instanceof NoSuchTxnException) { + result.o1 = (NoSuchTxnException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof TxnAbortedException) { + result.o2 = (TxnAbortedException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, lock_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.lock(args.rqst,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class check_lock extends org.apache.thrift.AsyncProcessFunction { + public check_lock() { + super("check_lock"); + } + + public check_lock_args getEmptyArgsInstance() { + return new check_lock_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(LockResponse o) { + check_lock_result result = new check_lock_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + check_lock_result result = new check_lock_result(); + if (e instanceof NoSuchTxnException) { + result.o1 = (NoSuchTxnException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof TxnAbortedException) { + result.o2 = (TxnAbortedException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof NoSuchLockException) { + result.o3 = (NoSuchLockException) e; result.setO3IsSet(true); msg = result; } @@ -34145,13 +34606,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list828 = iprot.readListBegin(); - struct.success = new ArrayList(_list828.size); - String _elem829; - for (int _i830 = 0; _i830 < _list828.size; ++_i830) + org.apache.thrift.protocol.TList _list868 = iprot.readListBegin(); + struct.success = new ArrayList(_list868.size); + String _elem869; + for (int _i870 = 0; _i870 < _list868.size; ++_i870) { - _elem829 = iprot.readString(); - struct.success.add(_elem829); + _elem869 = iprot.readString(); + struct.success.add(_elem869); } iprot.readListEnd(); } @@ -34186,9 +34647,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter831 : struct.success) + for (String _iter871 : struct.success) { - oprot.writeString(_iter831); + oprot.writeString(_iter871); } oprot.writeListEnd(); } @@ -34227,9 +34688,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter832 : struct.success) + for (String _iter872 : struct.success) { - oprot.writeString(_iter832); + oprot.writeString(_iter872); } } } @@ -34244,13 +34705,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list833.size); - String _elem834; - for (int _i835 = 0; _i835 < _list833.size; ++_i835) + org.apache.thrift.protocol.TList _list873 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list873.size); + String _elem874; + for (int _i875 = 0; _i875 < _list873.size; ++_i875) { - _elem834 = iprot.readString(); - struct.success.add(_elem834); + _elem874 = iprot.readString(); + struct.success.add(_elem874); } } struct.setSuccessIsSet(true); @@ -34904,13 +35365,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list836 = iprot.readListBegin(); - struct.success = new ArrayList(_list836.size); - String _elem837; - for (int _i838 = 0; _i838 < _list836.size; ++_i838) + org.apache.thrift.protocol.TList _list876 = iprot.readListBegin(); + struct.success = new ArrayList(_list876.size); + String _elem877; + for (int _i878 = 0; _i878 < _list876.size; ++_i878) { - _elem837 = iprot.readString(); - struct.success.add(_elem837); + _elem877 = iprot.readString(); + struct.success.add(_elem877); } iprot.readListEnd(); } @@ -34945,9 +35406,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter839 : struct.success) + for (String _iter879 : struct.success) { - oprot.writeString(_iter839); + oprot.writeString(_iter879); } oprot.writeListEnd(); } @@ -34986,9 +35447,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter840 : struct.success) + for (String _iter880 : struct.success) { - oprot.writeString(_iter840); + oprot.writeString(_iter880); } } } @@ -35003,13 +35464,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list841 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list841.size); - String _elem842; - for (int _i843 = 0; _i843 < _list841.size; ++_i843) + org.apache.thrift.protocol.TList _list881 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list881.size); + String _elem882; + for (int _i883 = 0; _i883 < _list881.size; ++_i883) { - _elem842 = iprot.readString(); - struct.success.add(_elem842); + _elem882 = iprot.readString(); + struct.success.add(_elem882); } } struct.setSuccessIsSet(true); @@ -39616,16 +40077,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map844 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map844.size); - String _key845; - Type _val846; - for (int _i847 = 0; _i847 < _map844.size; ++_i847) + org.apache.thrift.protocol.TMap _map884 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map884.size); + String _key885; + Type _val886; + for (int _i887 = 0; _i887 < _map884.size; ++_i887) { - _key845 = iprot.readString(); - _val846 = new Type(); - _val846.read(iprot); - struct.success.put(_key845, _val846); + _key885 = iprot.readString(); + _val886 = new Type(); + _val886.read(iprot); + struct.success.put(_key885, _val886); } iprot.readMapEnd(); } @@ -39660,10 +40121,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter848 : struct.success.entrySet()) + for (Map.Entry _iter888 : struct.success.entrySet()) { - oprot.writeString(_iter848.getKey()); - _iter848.getValue().write(oprot); + oprot.writeString(_iter888.getKey()); + _iter888.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -39702,10 +40163,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter849 : struct.success.entrySet()) + for (Map.Entry _iter889 : struct.success.entrySet()) { - oprot.writeString(_iter849.getKey()); - _iter849.getValue().write(oprot); + oprot.writeString(_iter889.getKey()); + _iter889.getValue().write(oprot); } } } @@ -39720,16 +40181,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map850 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map850.size); - String _key851; - Type _val852; - for (int _i853 = 0; _i853 < _map850.size; ++_i853) + org.apache.thrift.protocol.TMap _map890 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map890.size); + String _key891; + Type _val892; + for (int _i893 = 0; _i893 < _map890.size; ++_i893) { - _key851 = iprot.readString(); - _val852 = new Type(); - _val852.read(iprot); - struct.success.put(_key851, _val852); + _key891 = iprot.readString(); + _val892 = new Type(); + _val892.read(iprot); + struct.success.put(_key891, _val892); } } struct.setSuccessIsSet(true); @@ -40764,14 +41225,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list854 = iprot.readListBegin(); - struct.success = new ArrayList(_list854.size); - FieldSchema _elem855; - for (int _i856 = 0; _i856 < _list854.size; ++_i856) + org.apache.thrift.protocol.TList _list894 = iprot.readListBegin(); + struct.success = new ArrayList(_list894.size); + FieldSchema _elem895; + for (int _i896 = 0; _i896 < _list894.size; ++_i896) { - _elem855 = new FieldSchema(); - _elem855.read(iprot); - struct.success.add(_elem855); + _elem895 = new FieldSchema(); + _elem895.read(iprot); + struct.success.add(_elem895); } iprot.readListEnd(); } @@ -40824,9 +41285,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter857 : struct.success) + for (FieldSchema _iter897 : struct.success) { - _iter857.write(oprot); + _iter897.write(oprot); } oprot.writeListEnd(); } @@ -40881,9 +41342,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter858 : struct.success) + for (FieldSchema _iter898 : struct.success) { - _iter858.write(oprot); + _iter898.write(oprot); } } } @@ -40904,14 +41365,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list859 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list859.size); - FieldSchema _elem860; - for (int _i861 = 0; _i861 < _list859.size; ++_i861) + org.apache.thrift.protocol.TList _list899 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list899.size); + FieldSchema _elem900; + for (int _i901 = 0; _i901 < _list899.size; ++_i901) { - _elem860 = new FieldSchema(); - _elem860.read(iprot); - struct.success.add(_elem860); + _elem900 = new FieldSchema(); + _elem900.read(iprot); + struct.success.add(_elem900); } } struct.setSuccessIsSet(true); @@ -42065,14 +42526,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list862 = iprot.readListBegin(); - struct.success = new ArrayList(_list862.size); - FieldSchema _elem863; - for (int _i864 = 0; _i864 < _list862.size; ++_i864) + org.apache.thrift.protocol.TList _list902 = iprot.readListBegin(); + struct.success = new ArrayList(_list902.size); + FieldSchema _elem903; + for (int _i904 = 0; _i904 < _list902.size; ++_i904) { - _elem863 = new FieldSchema(); - _elem863.read(iprot); - struct.success.add(_elem863); + _elem903 = new FieldSchema(); + _elem903.read(iprot); + struct.success.add(_elem903); } iprot.readListEnd(); } @@ -42125,9 +42586,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter865 : struct.success) + for (FieldSchema _iter905 : struct.success) { - _iter865.write(oprot); + _iter905.write(oprot); } oprot.writeListEnd(); } @@ -42182,9 +42643,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter866 : struct.success) + for (FieldSchema _iter906 : struct.success) { - _iter866.write(oprot); + _iter906.write(oprot); } } } @@ -42205,14 +42666,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list867 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list867.size); - FieldSchema _elem868; - for (int _i869 = 0; _i869 < _list867.size; ++_i869) + org.apache.thrift.protocol.TList _list907 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list907.size); + FieldSchema _elem908; + for (int _i909 = 0; _i909 < _list907.size; ++_i909) { - _elem868 = new FieldSchema(); - _elem868.read(iprot); - struct.success.add(_elem868); + _elem908 = new FieldSchema(); + _elem908.read(iprot); + struct.success.add(_elem908); } } struct.setSuccessIsSet(true); @@ -43257,14 +43718,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list870 = iprot.readListBegin(); - struct.success = new ArrayList(_list870.size); - FieldSchema _elem871; - for (int _i872 = 0; _i872 < _list870.size; ++_i872) + org.apache.thrift.protocol.TList _list910 = iprot.readListBegin(); + struct.success = new ArrayList(_list910.size); + FieldSchema _elem911; + for (int _i912 = 0; _i912 < _list910.size; ++_i912) { - _elem871 = new FieldSchema(); - _elem871.read(iprot); - struct.success.add(_elem871); + _elem911 = new FieldSchema(); + _elem911.read(iprot); + struct.success.add(_elem911); } iprot.readListEnd(); } @@ -43317,9 +43778,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter873 : struct.success) + for (FieldSchema _iter913 : struct.success) { - _iter873.write(oprot); + _iter913.write(oprot); } oprot.writeListEnd(); } @@ -43374,9 +43835,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter874 : struct.success) + for (FieldSchema _iter914 : struct.success) { - _iter874.write(oprot); + _iter914.write(oprot); } } } @@ -43397,14 +43858,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list875 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list875.size); - FieldSchema _elem876; - for (int _i877 = 0; _i877 < _list875.size; ++_i877) + org.apache.thrift.protocol.TList _list915 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list915.size); + FieldSchema _elem916; + for (int _i917 = 0; _i917 < _list915.size; ++_i917) { - _elem876 = new FieldSchema(); - _elem876.read(iprot); - struct.success.add(_elem876); + _elem916 = new FieldSchema(); + _elem916.read(iprot); + struct.success.add(_elem916); } } struct.setSuccessIsSet(true); @@ -44558,14 +45019,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list878 = iprot.readListBegin(); - struct.success = new ArrayList(_list878.size); - FieldSchema _elem879; - for (int _i880 = 0; _i880 < _list878.size; ++_i880) + org.apache.thrift.protocol.TList _list918 = iprot.readListBegin(); + struct.success = new ArrayList(_list918.size); + FieldSchema _elem919; + for (int _i920 = 0; _i920 < _list918.size; ++_i920) { - _elem879 = new FieldSchema(); - _elem879.read(iprot); - struct.success.add(_elem879); + _elem919 = new FieldSchema(); + _elem919.read(iprot); + struct.success.add(_elem919); } iprot.readListEnd(); } @@ -44618,9 +45079,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter881 : struct.success) + for (FieldSchema _iter921 : struct.success) { - _iter881.write(oprot); + _iter921.write(oprot); } oprot.writeListEnd(); } @@ -44675,9 +45136,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter882 : struct.success) + for (FieldSchema _iter922 : struct.success) { - _iter882.write(oprot); + _iter922.write(oprot); } } } @@ -44698,14 +45159,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list883 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list883.size); - FieldSchema _elem884; - for (int _i885 = 0; _i885 < _list883.size; ++_i885) + org.apache.thrift.protocol.TList _list923 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list923.size); + FieldSchema _elem924; + for (int _i925 = 0; _i925 < _list923.size; ++_i925) { - _elem884 = new FieldSchema(); - _elem884.read(iprot); - struct.success.add(_elem884); + _elem924 = new FieldSchema(); + _elem924.read(iprot); + struct.success.add(_elem924); } } struct.setSuccessIsSet(true); @@ -47632,14 +48093,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list886 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list886.size); - SQLPrimaryKey _elem887; - for (int _i888 = 0; _i888 < _list886.size; ++_i888) + org.apache.thrift.protocol.TList _list926 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list926.size); + SQLPrimaryKey _elem927; + for (int _i928 = 0; _i928 < _list926.size; ++_i928) { - _elem887 = new SQLPrimaryKey(); - _elem887.read(iprot); - struct.primaryKeys.add(_elem887); + _elem927 = new SQLPrimaryKey(); + _elem927.read(iprot); + struct.primaryKeys.add(_elem927); } iprot.readListEnd(); } @@ -47651,14 +48112,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list889 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list889.size); - SQLForeignKey _elem890; - for (int _i891 = 0; _i891 < _list889.size; ++_i891) + org.apache.thrift.protocol.TList _list929 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list929.size); + SQLForeignKey _elem930; + for (int _i931 = 0; _i931 < _list929.size; ++_i931) { - _elem890 = new SQLForeignKey(); - _elem890.read(iprot); - struct.foreignKeys.add(_elem890); + _elem930 = new SQLForeignKey(); + _elem930.read(iprot); + struct.foreignKeys.add(_elem930); } iprot.readListEnd(); } @@ -47670,14 +48131,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list892 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list892.size); - SQLUniqueConstraint _elem893; - for (int _i894 = 0; _i894 < _list892.size; ++_i894) + org.apache.thrift.protocol.TList _list932 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list932.size); + SQLUniqueConstraint _elem933; + for (int _i934 = 0; _i934 < _list932.size; ++_i934) { - _elem893 = new SQLUniqueConstraint(); - _elem893.read(iprot); - struct.uniqueConstraints.add(_elem893); + _elem933 = new SQLUniqueConstraint(); + _elem933.read(iprot); + struct.uniqueConstraints.add(_elem933); } iprot.readListEnd(); } @@ -47689,14 +48150,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list895 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list895.size); - SQLNotNullConstraint _elem896; - for (int _i897 = 0; _i897 < _list895.size; ++_i897) + org.apache.thrift.protocol.TList _list935 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list935.size); + SQLNotNullConstraint _elem936; + for (int _i937 = 0; _i937 < _list935.size; ++_i937) { - _elem896 = new SQLNotNullConstraint(); - _elem896.read(iprot); - struct.notNullConstraints.add(_elem896); + _elem936 = new SQLNotNullConstraint(); + _elem936.read(iprot); + struct.notNullConstraints.add(_elem936); } iprot.readListEnd(); } @@ -47727,9 +48188,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter898 : struct.primaryKeys) + for (SQLPrimaryKey _iter938 : struct.primaryKeys) { - _iter898.write(oprot); + _iter938.write(oprot); } oprot.writeListEnd(); } @@ -47739,9 +48200,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter899 : struct.foreignKeys) + for (SQLForeignKey _iter939 : struct.foreignKeys) { - _iter899.write(oprot); + _iter939.write(oprot); } oprot.writeListEnd(); } @@ -47751,9 +48212,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter900 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter940 : struct.uniqueConstraints) { - _iter900.write(oprot); + _iter940.write(oprot); } oprot.writeListEnd(); } @@ -47763,9 +48224,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter901 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter941 : struct.notNullConstraints) { - _iter901.write(oprot); + _iter941.write(oprot); } oprot.writeListEnd(); } @@ -47811,36 +48272,36 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter902 : struct.primaryKeys) + for (SQLPrimaryKey _iter942 : struct.primaryKeys) { - _iter902.write(oprot); + _iter942.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter903 : struct.foreignKeys) + for (SQLForeignKey _iter943 : struct.foreignKeys) { - _iter903.write(oprot); + _iter943.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter904 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter944 : struct.uniqueConstraints) { - _iter904.write(oprot); + _iter944.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter905 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter945 : struct.notNullConstraints) { - _iter905.write(oprot); + _iter945.write(oprot); } } } @@ -47857,56 +48318,56 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list906 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list906.size); - SQLPrimaryKey _elem907; - for (int _i908 = 0; _i908 < _list906.size; ++_i908) + org.apache.thrift.protocol.TList _list946 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list946.size); + SQLPrimaryKey _elem947; + for (int _i948 = 0; _i948 < _list946.size; ++_i948) { - _elem907 = new SQLPrimaryKey(); - _elem907.read(iprot); - struct.primaryKeys.add(_elem907); + _elem947 = new SQLPrimaryKey(); + _elem947.read(iprot); + struct.primaryKeys.add(_elem947); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list909.size); - SQLForeignKey _elem910; - for (int _i911 = 0; _i911 < _list909.size; ++_i911) + org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list949.size); + SQLForeignKey _elem950; + for (int _i951 = 0; _i951 < _list949.size; ++_i951) { - _elem910 = new SQLForeignKey(); - _elem910.read(iprot); - struct.foreignKeys.add(_elem910); + _elem950 = new SQLForeignKey(); + _elem950.read(iprot); + struct.foreignKeys.add(_elem950); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list912 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list912.size); - SQLUniqueConstraint _elem913; - for (int _i914 = 0; _i914 < _list912.size; ++_i914) + org.apache.thrift.protocol.TList _list952 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list952.size); + SQLUniqueConstraint _elem953; + for (int _i954 = 0; _i954 < _list952.size; ++_i954) { - _elem913 = new SQLUniqueConstraint(); - _elem913.read(iprot); - struct.uniqueConstraints.add(_elem913); + _elem953 = new SQLUniqueConstraint(); + _elem953.read(iprot); + struct.uniqueConstraints.add(_elem953); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list915 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list915.size); - SQLNotNullConstraint _elem916; - for (int _i917 = 0; _i917 < _list915.size; ++_i917) + org.apache.thrift.protocol.TList _list955 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list955.size); + SQLNotNullConstraint _elem956; + for (int _i957 = 0; _i957 < _list955.size; ++_i957) { - _elem916 = new SQLNotNullConstraint(); - _elem916.read(iprot); - struct.notNullConstraints.add(_elem916); + _elem956 = new SQLNotNullConstraint(); + _elem956.read(iprot); + struct.notNullConstraints.add(_elem956); } } struct.setNotNullConstraintsIsSet(true); @@ -55398,13 +55859,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args case 3: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list918 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list918.size); - String _elem919; - for (int _i920 = 0; _i920 < _list918.size; ++_i920) + org.apache.thrift.protocol.TList _list958 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list958.size); + String _elem959; + for (int _i960 = 0; _i960 < _list958.size; ++_i960) { - _elem919 = iprot.readString(); - struct.partNames.add(_elem919); + _elem959 = iprot.readString(); + struct.partNames.add(_elem959); } iprot.readListEnd(); } @@ -55440,9 +55901,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter921 : struct.partNames) + for (String _iter961 : struct.partNames) { - oprot.writeString(_iter921); + oprot.writeString(_iter961); } oprot.writeListEnd(); } @@ -55485,9 +55946,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter922 : struct.partNames) + for (String _iter962 : struct.partNames) { - oprot.writeString(_iter922); + oprot.writeString(_iter962); } } } @@ -55507,13 +55968,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list923 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list923.size); - String _elem924; - for (int _i925 = 0; _i925 < _list923.size; ++_i925) + org.apache.thrift.protocol.TList _list963 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list963.size); + String _elem964; + for (int _i965 = 0; _i965 < _list963.size; ++_i965) { - _elem924 = iprot.readString(); - struct.partNames.add(_elem924); + _elem964 = iprot.readString(); + struct.partNames.add(_elem964); } } struct.setPartNamesIsSet(true); @@ -56738,13 +57199,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list926 = iprot.readListBegin(); - struct.success = new ArrayList(_list926.size); - String _elem927; - for (int _i928 = 0; _i928 < _list926.size; ++_i928) + org.apache.thrift.protocol.TList _list966 = iprot.readListBegin(); + struct.success = new ArrayList(_list966.size); + String _elem967; + for (int _i968 = 0; _i968 < _list966.size; ++_i968) { - _elem927 = iprot.readString(); - struct.success.add(_elem927); + _elem967 = iprot.readString(); + struct.success.add(_elem967); } iprot.readListEnd(); } @@ -56779,9 +57240,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter929 : struct.success) + for (String _iter969 : struct.success) { - oprot.writeString(_iter929); + oprot.writeString(_iter969); } oprot.writeListEnd(); } @@ -56820,9 +57281,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter930 : struct.success) + for (String _iter970 : struct.success) { - oprot.writeString(_iter930); + oprot.writeString(_iter970); } } } @@ -56837,13 +57298,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list931 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list931.size); - String _elem932; - for (int _i933 = 0; _i933 < _list931.size; ++_i933) + org.apache.thrift.protocol.TList _list971 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list971.size); + String _elem972; + for (int _i973 = 0; _i973 < _list971.size; ++_i973) { - _elem932 = iprot.readString(); - struct.success.add(_elem932); + _elem972 = iprot.readString(); + struct.success.add(_elem972); } } struct.setSuccessIsSet(true); @@ -57817,13 +58278,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list934 = iprot.readListBegin(); - struct.success = new ArrayList(_list934.size); - String _elem935; - for (int _i936 = 0; _i936 < _list934.size; ++_i936) + org.apache.thrift.protocol.TList _list974 = iprot.readListBegin(); + struct.success = new ArrayList(_list974.size); + String _elem975; + for (int _i976 = 0; _i976 < _list974.size; ++_i976) { - _elem935 = iprot.readString(); - struct.success.add(_elem935); + _elem975 = iprot.readString(); + struct.success.add(_elem975); } iprot.readListEnd(); } @@ -57858,9 +58319,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter937 : struct.success) + for (String _iter977 : struct.success) { - oprot.writeString(_iter937); + oprot.writeString(_iter977); } oprot.writeListEnd(); } @@ -57899,9 +58360,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter938 : struct.success) + for (String _iter978 : struct.success) { - oprot.writeString(_iter938); + oprot.writeString(_iter978); } } } @@ -57916,13 +58377,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list939 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list939.size); - String _elem940; - for (int _i941 = 0; _i941 < _list939.size; ++_i941) + org.apache.thrift.protocol.TList _list979 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list979.size); + String _elem980; + for (int _i981 = 0; _i981 < _list979.size; ++_i981) { - _elem940 = iprot.readString(); - struct.success.add(_elem940); + _elem980 = iprot.readString(); + struct.success.add(_elem980); } } struct.setSuccessIsSet(true); @@ -58688,13 +59149,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list942 = iprot.readListBegin(); - struct.success = new ArrayList(_list942.size); - String _elem943; - for (int _i944 = 0; _i944 < _list942.size; ++_i944) + org.apache.thrift.protocol.TList _list982 = iprot.readListBegin(); + struct.success = new ArrayList(_list982.size); + String _elem983; + for (int _i984 = 0; _i984 < _list982.size; ++_i984) { - _elem943 = iprot.readString(); - struct.success.add(_elem943); + _elem983 = iprot.readString(); + struct.success.add(_elem983); } iprot.readListEnd(); } @@ -58729,9 +59190,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter945 : struct.success) + for (String _iter985 : struct.success) { - oprot.writeString(_iter945); + oprot.writeString(_iter985); } oprot.writeListEnd(); } @@ -58770,9 +59231,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter946 : struct.success) + for (String _iter986 : struct.success) { - oprot.writeString(_iter946); + oprot.writeString(_iter986); } } } @@ -58787,13 +59248,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list947 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list947.size); - String _elem948; - for (int _i949 = 0; _i949 < _list947.size; ++_i949) + org.apache.thrift.protocol.TList _list987 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list987.size); + String _elem988; + for (int _i989 = 0; _i989 < _list987.size; ++_i989) { - _elem948 = iprot.readString(); - struct.success.add(_elem948); + _elem988 = iprot.readString(); + struct.success.add(_elem988); } } struct.setSuccessIsSet(true); @@ -59298,13 +59759,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list950 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list950.size); - String _elem951; - for (int _i952 = 0; _i952 < _list950.size; ++_i952) + org.apache.thrift.protocol.TList _list990 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list990.size); + String _elem991; + for (int _i992 = 0; _i992 < _list990.size; ++_i992) { - _elem951 = iprot.readString(); - struct.tbl_types.add(_elem951); + _elem991 = iprot.readString(); + struct.tbl_types.add(_elem991); } iprot.readListEnd(); } @@ -59340,9 +59801,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter953 : struct.tbl_types) + for (String _iter993 : struct.tbl_types) { - oprot.writeString(_iter953); + oprot.writeString(_iter993); } oprot.writeListEnd(); } @@ -59385,9 +59846,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter954 : struct.tbl_types) + for (String _iter994 : struct.tbl_types) { - oprot.writeString(_iter954); + oprot.writeString(_iter994); } } } @@ -59407,13 +59868,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list955 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list955.size); - String _elem956; - for (int _i957 = 0; _i957 < _list955.size; ++_i957) + org.apache.thrift.protocol.TList _list995 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list995.size); + String _elem996; + for (int _i997 = 0; _i997 < _list995.size; ++_i997) { - _elem956 = iprot.readString(); - struct.tbl_types.add(_elem956); + _elem996 = iprot.readString(); + struct.tbl_types.add(_elem996); } } struct.setTbl_typesIsSet(true); @@ -59819,14 +60280,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list958 = iprot.readListBegin(); - struct.success = new ArrayList(_list958.size); - TableMeta _elem959; - for (int _i960 = 0; _i960 < _list958.size; ++_i960) + org.apache.thrift.protocol.TList _list998 = iprot.readListBegin(); + struct.success = new ArrayList(_list998.size); + TableMeta _elem999; + for (int _i1000 = 0; _i1000 < _list998.size; ++_i1000) { - _elem959 = new TableMeta(); - _elem959.read(iprot); - struct.success.add(_elem959); + _elem999 = new TableMeta(); + _elem999.read(iprot); + struct.success.add(_elem999); } iprot.readListEnd(); } @@ -59861,9 +60322,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter961 : struct.success) + for (TableMeta _iter1001 : struct.success) { - _iter961.write(oprot); + _iter1001.write(oprot); } oprot.writeListEnd(); } @@ -59902,9 +60363,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter962 : struct.success) + for (TableMeta _iter1002 : struct.success) { - _iter962.write(oprot); + _iter1002.write(oprot); } } } @@ -59919,14 +60380,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list963 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list963.size); - TableMeta _elem964; - for (int _i965 = 0; _i965 < _list963.size; ++_i965) + org.apache.thrift.protocol.TList _list1003 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1003.size); + TableMeta _elem1004; + for (int _i1005 = 0; _i1005 < _list1003.size; ++_i1005) { - _elem964 = new TableMeta(); - _elem964.read(iprot); - struct.success.add(_elem964); + _elem1004 = new TableMeta(); + _elem1004.read(iprot); + struct.success.add(_elem1004); } } struct.setSuccessIsSet(true); @@ -60692,13 +61153,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list966 = iprot.readListBegin(); - struct.success = new ArrayList(_list966.size); - String _elem967; - for (int _i968 = 0; _i968 < _list966.size; ++_i968) + org.apache.thrift.protocol.TList _list1006 = iprot.readListBegin(); + struct.success = new ArrayList(_list1006.size); + String _elem1007; + for (int _i1008 = 0; _i1008 < _list1006.size; ++_i1008) { - _elem967 = iprot.readString(); - struct.success.add(_elem967); + _elem1007 = iprot.readString(); + struct.success.add(_elem1007); } iprot.readListEnd(); } @@ -60733,9 +61194,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter969 : struct.success) + for (String _iter1009 : struct.success) { - oprot.writeString(_iter969); + oprot.writeString(_iter1009); } oprot.writeListEnd(); } @@ -60774,9 +61235,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter970 : struct.success) + for (String _iter1010 : struct.success) { - oprot.writeString(_iter970); + oprot.writeString(_iter1010); } } } @@ -60791,13 +61252,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list971 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list971.size); - String _elem972; - for (int _i973 = 0; _i973 < _list971.size; ++_i973) + org.apache.thrift.protocol.TList _list1011 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1011.size); + String _elem1012; + for (int _i1013 = 0; _i1013 < _list1011.size; ++_i1013) { - _elem972 = iprot.readString(); - struct.success.add(_elem972); + _elem1012 = iprot.readString(); + struct.success.add(_elem1012); } } struct.setSuccessIsSet(true); @@ -62250,13 +62711,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list974 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list974.size); - String _elem975; - for (int _i976 = 0; _i976 < _list974.size; ++_i976) + org.apache.thrift.protocol.TList _list1014 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1014.size); + String _elem1015; + for (int _i1016 = 0; _i1016 < _list1014.size; ++_i1016) { - _elem975 = iprot.readString(); - struct.tbl_names.add(_elem975); + _elem1015 = iprot.readString(); + struct.tbl_names.add(_elem1015); } iprot.readListEnd(); } @@ -62287,9 +62748,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter977 : struct.tbl_names) + for (String _iter1017 : struct.tbl_names) { - oprot.writeString(_iter977); + oprot.writeString(_iter1017); } oprot.writeListEnd(); } @@ -62326,9 +62787,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter978 : struct.tbl_names) + for (String _iter1018 : struct.tbl_names) { - oprot.writeString(_iter978); + oprot.writeString(_iter1018); } } } @@ -62344,13 +62805,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list979 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list979.size); - String _elem980; - for (int _i981 = 0; _i981 < _list979.size; ++_i981) + org.apache.thrift.protocol.TList _list1019 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1019.size); + String _elem1020; + for (int _i1021 = 0; _i1021 < _list1019.size; ++_i1021) { - _elem980 = iprot.readString(); - struct.tbl_names.add(_elem980); + _elem1020 = iprot.readString(); + struct.tbl_names.add(_elem1020); } } struct.setTbl_namesIsSet(true); @@ -62675,14 +63136,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list982 = iprot.readListBegin(); - struct.success = new ArrayList
(_list982.size); - Table _elem983; - for (int _i984 = 0; _i984 < _list982.size; ++_i984) + org.apache.thrift.protocol.TList _list1022 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1022.size); + Table _elem1023; + for (int _i1024 = 0; _i1024 < _list1022.size; ++_i1024) { - _elem983 = new Table(); - _elem983.read(iprot); - struct.success.add(_elem983); + _elem1023 = new Table(); + _elem1023.read(iprot); + struct.success.add(_elem1023); } iprot.readListEnd(); } @@ -62708,9 +63169,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter985 : struct.success) + for (Table _iter1025 : struct.success) { - _iter985.write(oprot); + _iter1025.write(oprot); } oprot.writeListEnd(); } @@ -62741,9 +63202,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter986 : struct.success) + for (Table _iter1026 : struct.success) { - _iter986.write(oprot); + _iter1026.write(oprot); } } } @@ -62755,14 +63216,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list987 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list987.size); - Table _elem988; - for (int _i989 = 0; _i989 < _list987.size; ++_i989) + org.apache.thrift.protocol.TList _list1027 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1027.size); + Table _elem1028; + for (int _i1029 = 0; _i1029 < _list1027.size; ++_i1029) { - _elem988 = new Table(); - _elem988.read(iprot); - struct.success.add(_elem988); + _elem1028 = new Table(); + _elem1028.read(iprot); + struct.success.add(_elem1028); } } struct.setSuccessIsSet(true); @@ -65155,13 +65616,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list990 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list990.size); - String _elem991; - for (int _i992 = 0; _i992 < _list990.size; ++_i992) + org.apache.thrift.protocol.TList _list1030 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1030.size); + String _elem1031; + for (int _i1032 = 0; _i1032 < _list1030.size; ++_i1032) { - _elem991 = iprot.readString(); - struct.tbl_names.add(_elem991); + _elem1031 = iprot.readString(); + struct.tbl_names.add(_elem1031); } iprot.readListEnd(); } @@ -65192,9 +65653,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter993 : struct.tbl_names) + for (String _iter1033 : struct.tbl_names) { - oprot.writeString(_iter993); + oprot.writeString(_iter1033); } oprot.writeListEnd(); } @@ -65231,9 +65692,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter994 : struct.tbl_names) + for (String _iter1034 : struct.tbl_names) { - oprot.writeString(_iter994); + oprot.writeString(_iter1034); } } } @@ -65249,13 +65710,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list995 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list995.size); - String _elem996; - for (int _i997 = 0; _i997 < _list995.size; ++_i997) + org.apache.thrift.protocol.TList _list1035 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1035.size); + String _elem1036; + for (int _i1037 = 0; _i1037 < _list1035.size; ++_i1037) { - _elem996 = iprot.readString(); - struct.tbl_names.add(_elem996); + _elem1036 = iprot.readString(); + struct.tbl_names.add(_elem1036); } } struct.setTbl_namesIsSet(true); @@ -65828,16 +66289,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map998 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map998.size); - String _key999; - Materialization _val1000; - for (int _i1001 = 0; _i1001 < _map998.size; ++_i1001) + org.apache.thrift.protocol.TMap _map1038 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1038.size); + String _key1039; + Materialization _val1040; + for (int _i1041 = 0; _i1041 < _map1038.size; ++_i1041) { - _key999 = iprot.readString(); - _val1000 = new Materialization(); - _val1000.read(iprot); - struct.success.put(_key999, _val1000); + _key1039 = iprot.readString(); + _val1040 = new Materialization(); + _val1040.read(iprot); + struct.success.put(_key1039, _val1040); } iprot.readMapEnd(); } @@ -65890,10 +66351,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter1002 : struct.success.entrySet()) + for (Map.Entry _iter1042 : struct.success.entrySet()) { - oprot.writeString(_iter1002.getKey()); - _iter1002.getValue().write(oprot); + oprot.writeString(_iter1042.getKey()); + _iter1042.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -65948,10 +66409,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1003 : struct.success.entrySet()) + for (Map.Entry _iter1043 : struct.success.entrySet()) { - oprot.writeString(_iter1003.getKey()); - _iter1003.getValue().write(oprot); + oprot.writeString(_iter1043.getKey()); + _iter1043.getValue().write(oprot); } } } @@ -65972,16 +66433,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1004 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map1004.size); - String _key1005; - Materialization _val1006; - for (int _i1007 = 0; _i1007 < _map1004.size; ++_i1007) + org.apache.thrift.protocol.TMap _map1044 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map1044.size); + String _key1045; + Materialization _val1046; + for (int _i1047 = 0; _i1047 < _map1044.size; ++_i1047) { - _key1005 = iprot.readString(); - _val1006 = new Materialization(); - _val1006.read(iprot); - struct.success.put(_key1005, _val1006); + _key1045 = iprot.readString(); + _val1046 = new Materialization(); + _val1046.read(iprot); + struct.success.put(_key1045, _val1046); } } struct.setSuccessIsSet(true); @@ -67127,13 +67588,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin(); - struct.success = new ArrayList(_list1008.size); - String _elem1009; - for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010) + org.apache.thrift.protocol.TList _list1048 = iprot.readListBegin(); + struct.success = new ArrayList(_list1048.size); + String _elem1049; + for (int _i1050 = 0; _i1050 < _list1048.size; ++_i1050) { - _elem1009 = iprot.readString(); - struct.success.add(_elem1009); + _elem1049 = iprot.readString(); + struct.success.add(_elem1049); } iprot.readListEnd(); } @@ -67186,9 +67647,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1011 : struct.success) + for (String _iter1051 : struct.success) { - oprot.writeString(_iter1011); + oprot.writeString(_iter1051); } oprot.writeListEnd(); } @@ -67243,9 +67704,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1012 : struct.success) + for (String _iter1052 : struct.success) { - oprot.writeString(_iter1012); + oprot.writeString(_iter1052); } } } @@ -67266,13 +67727,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1013 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1013.size); - String _elem1014; - for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015) + org.apache.thrift.protocol.TList _list1053 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1053.size); + String _elem1054; + for (int _i1055 = 0; _i1055 < _list1053.size; ++_i1055) { - _elem1014 = iprot.readString(); - struct.success.add(_elem1014); + _elem1054 = iprot.readString(); + struct.success.add(_elem1054); } } struct.setSuccessIsSet(true); @@ -73131,14 +73592,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1016 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1016.size); - Partition _elem1017; - for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018) + org.apache.thrift.protocol.TList _list1056 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1056.size); + Partition _elem1057; + for (int _i1058 = 0; _i1058 < _list1056.size; ++_i1058) { - _elem1017 = new Partition(); - _elem1017.read(iprot); - struct.new_parts.add(_elem1017); + _elem1057 = new Partition(); + _elem1057.read(iprot); + struct.new_parts.add(_elem1057); } iprot.readListEnd(); } @@ -73164,9 +73625,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1019 : struct.new_parts) + for (Partition _iter1059 : struct.new_parts) { - _iter1019.write(oprot); + _iter1059.write(oprot); } oprot.writeListEnd(); } @@ -73197,9 +73658,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1020 : struct.new_parts) + for (Partition _iter1060 : struct.new_parts) { - _iter1020.write(oprot); + _iter1060.write(oprot); } } } @@ -73211,14 +73672,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1021 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1021.size); - Partition _elem1022; - for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023) + org.apache.thrift.protocol.TList _list1061 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1061.size); + Partition _elem1062; + for (int _i1063 = 0; _i1063 < _list1061.size; ++_i1063) { - _elem1022 = new Partition(); - _elem1022.read(iprot); - struct.new_parts.add(_elem1022); + _elem1062 = new Partition(); + _elem1062.read(iprot); + struct.new_parts.add(_elem1062); } } struct.setNew_partsIsSet(true); @@ -74219,14 +74680,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1024.size); - PartitionSpec _elem1025; - for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026) + org.apache.thrift.protocol.TList _list1064 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1064.size); + PartitionSpec _elem1065; + for (int _i1066 = 0; _i1066 < _list1064.size; ++_i1066) { - _elem1025 = new PartitionSpec(); - _elem1025.read(iprot); - struct.new_parts.add(_elem1025); + _elem1065 = new PartitionSpec(); + _elem1065.read(iprot); + struct.new_parts.add(_elem1065); } iprot.readListEnd(); } @@ -74252,9 +74713,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1027 : struct.new_parts) + for (PartitionSpec _iter1067 : struct.new_parts) { - _iter1027.write(oprot); + _iter1067.write(oprot); } oprot.writeListEnd(); } @@ -74285,9 +74746,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1028 : struct.new_parts) + for (PartitionSpec _iter1068 : struct.new_parts) { - _iter1028.write(oprot); + _iter1068.write(oprot); } } } @@ -74299,14 +74760,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1029 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1029.size); - PartitionSpec _elem1030; - for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031) + org.apache.thrift.protocol.TList _list1069 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1069.size); + PartitionSpec _elem1070; + for (int _i1071 = 0; _i1071 < _list1069.size; ++_i1071) { - _elem1030 = new PartitionSpec(); - _elem1030.read(iprot); - struct.new_parts.add(_elem1030); + _elem1070 = new PartitionSpec(); + _elem1070.read(iprot); + struct.new_parts.add(_elem1070); } } struct.setNew_partsIsSet(true); @@ -75482,13 +75943,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1032.size); - String _elem1033; - for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034) + org.apache.thrift.protocol.TList _list1072 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1072.size); + String _elem1073; + for (int _i1074 = 0; _i1074 < _list1072.size; ++_i1074) { - _elem1033 = iprot.readString(); - struct.part_vals.add(_elem1033); + _elem1073 = iprot.readString(); + struct.part_vals.add(_elem1073); } iprot.readListEnd(); } @@ -75524,9 +75985,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1035 : struct.part_vals) + for (String _iter1075 : struct.part_vals) { - oprot.writeString(_iter1035); + oprot.writeString(_iter1075); } oprot.writeListEnd(); } @@ -75569,9 +76030,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1036 : struct.part_vals) + for (String _iter1076 : struct.part_vals) { - oprot.writeString(_iter1036); + oprot.writeString(_iter1076); } } } @@ -75591,13 +76052,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1037 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1037.size); - String _elem1038; - for (int _i1039 = 0; _i1039 < _list1037.size; ++_i1039) + org.apache.thrift.protocol.TList _list1077 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1077.size); + String _elem1078; + for (int _i1079 = 0; _i1079 < _list1077.size; ++_i1079) { - _elem1038 = iprot.readString(); - struct.part_vals.add(_elem1038); + _elem1078 = iprot.readString(); + struct.part_vals.add(_elem1078); } } struct.setPart_valsIsSet(true); @@ -77906,13 +78367,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1040 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1040.size); - String _elem1041; - for (int _i1042 = 0; _i1042 < _list1040.size; ++_i1042) + org.apache.thrift.protocol.TList _list1080 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1080.size); + String _elem1081; + for (int _i1082 = 0; _i1082 < _list1080.size; ++_i1082) { - _elem1041 = iprot.readString(); - struct.part_vals.add(_elem1041); + _elem1081 = iprot.readString(); + struct.part_vals.add(_elem1081); } iprot.readListEnd(); } @@ -77957,9 +78418,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1043 : struct.part_vals) + for (String _iter1083 : struct.part_vals) { - oprot.writeString(_iter1043); + oprot.writeString(_iter1083); } oprot.writeListEnd(); } @@ -78010,9 +78471,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1044 : struct.part_vals) + for (String _iter1084 : struct.part_vals) { - oprot.writeString(_iter1044); + oprot.writeString(_iter1084); } } } @@ -78035,13 +78496,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1045 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1045.size); - String _elem1046; - for (int _i1047 = 0; _i1047 < _list1045.size; ++_i1047) + org.apache.thrift.protocol.TList _list1085 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1085.size); + String _elem1086; + for (int _i1087 = 0; _i1087 < _list1085.size; ++_i1087) { - _elem1046 = iprot.readString(); - struct.part_vals.add(_elem1046); + _elem1086 = iprot.readString(); + struct.part_vals.add(_elem1086); } } struct.setPart_valsIsSet(true); @@ -81911,13 +82372,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1048 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1048.size); - String _elem1049; - for (int _i1050 = 0; _i1050 < _list1048.size; ++_i1050) + org.apache.thrift.protocol.TList _list1088 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1088.size); + String _elem1089; + for (int _i1090 = 0; _i1090 < _list1088.size; ++_i1090) { - _elem1049 = iprot.readString(); - struct.part_vals.add(_elem1049); + _elem1089 = iprot.readString(); + struct.part_vals.add(_elem1089); } iprot.readListEnd(); } @@ -81961,9 +82422,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1051 : struct.part_vals) + for (String _iter1091 : struct.part_vals) { - oprot.writeString(_iter1051); + oprot.writeString(_iter1091); } oprot.writeListEnd(); } @@ -82012,9 +82473,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1052 : struct.part_vals) + for (String _iter1092 : struct.part_vals) { - oprot.writeString(_iter1052); + oprot.writeString(_iter1092); } } } @@ -82037,13 +82498,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1053 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1053.size); - String _elem1054; - for (int _i1055 = 0; _i1055 < _list1053.size; ++_i1055) + org.apache.thrift.protocol.TList _list1093 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1093.size); + String _elem1094; + for (int _i1095 = 0; _i1095 < _list1093.size; ++_i1095) { - _elem1054 = iprot.readString(); - struct.part_vals.add(_elem1054); + _elem1094 = iprot.readString(); + struct.part_vals.add(_elem1094); } } struct.setPart_valsIsSet(true); @@ -83282,13 +83743,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1056 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1056.size); - String _elem1057; - for (int _i1058 = 0; _i1058 < _list1056.size; ++_i1058) + org.apache.thrift.protocol.TList _list1096 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1096.size); + String _elem1097; + for (int _i1098 = 0; _i1098 < _list1096.size; ++_i1098) { - _elem1057 = iprot.readString(); - struct.part_vals.add(_elem1057); + _elem1097 = iprot.readString(); + struct.part_vals.add(_elem1097); } iprot.readListEnd(); } @@ -83341,9 +83802,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1059 : struct.part_vals) + for (String _iter1099 : struct.part_vals) { - oprot.writeString(_iter1059); + oprot.writeString(_iter1099); } oprot.writeListEnd(); } @@ -83400,9 +83861,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1060 : struct.part_vals) + for (String _iter1100 : struct.part_vals) { - oprot.writeString(_iter1060); + oprot.writeString(_iter1100); } } } @@ -83428,13 +83889,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1061 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1061.size); - String _elem1062; - for (int _i1063 = 0; _i1063 < _list1061.size; ++_i1063) + org.apache.thrift.protocol.TList _list1101 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1101.size); + String _elem1102; + for (int _i1103 = 0; _i1103 < _list1101.size; ++_i1103) { - _elem1062 = iprot.readString(); - struct.part_vals.add(_elem1062); + _elem1102 = iprot.readString(); + struct.part_vals.add(_elem1102); } } struct.setPart_valsIsSet(true); @@ -88036,13 +88497,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1064 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1064.size); - String _elem1065; - for (int _i1066 = 0; _i1066 < _list1064.size; ++_i1066) + org.apache.thrift.protocol.TList _list1104 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1104.size); + String _elem1105; + for (int _i1106 = 0; _i1106 < _list1104.size; ++_i1106) { - _elem1065 = iprot.readString(); - struct.part_vals.add(_elem1065); + _elem1105 = iprot.readString(); + struct.part_vals.add(_elem1105); } iprot.readListEnd(); } @@ -88078,9 +88539,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1067 : struct.part_vals) + for (String _iter1107 : struct.part_vals) { - oprot.writeString(_iter1067); + oprot.writeString(_iter1107); } oprot.writeListEnd(); } @@ -88123,9 +88584,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1068 : struct.part_vals) + for (String _iter1108 : struct.part_vals) { - oprot.writeString(_iter1068); + oprot.writeString(_iter1108); } } } @@ -88145,13 +88606,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1069 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1069.size); - String _elem1070; - for (int _i1071 = 0; _i1071 < _list1069.size; ++_i1071) + org.apache.thrift.protocol.TList _list1109 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1109.size); + String _elem1110; + for (int _i1111 = 0; _i1111 < _list1109.size; ++_i1111) { - _elem1070 = iprot.readString(); - struct.part_vals.add(_elem1070); + _elem1110 = iprot.readString(); + struct.part_vals.add(_elem1110); } } struct.setPart_valsIsSet(true); @@ -89369,15 +89830,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1072 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1072.size); - String _key1073; - String _val1074; - for (int _i1075 = 0; _i1075 < _map1072.size; ++_i1075) + org.apache.thrift.protocol.TMap _map1112 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1112.size); + String _key1113; + String _val1114; + for (int _i1115 = 0; _i1115 < _map1112.size; ++_i1115) { - _key1073 = iprot.readString(); - _val1074 = iprot.readString(); - struct.partitionSpecs.put(_key1073, _val1074); + _key1113 = iprot.readString(); + _val1114 = iprot.readString(); + struct.partitionSpecs.put(_key1113, _val1114); } iprot.readMapEnd(); } @@ -89435,10 +89896,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1076 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1116 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1076.getKey()); - oprot.writeString(_iter1076.getValue()); + oprot.writeString(_iter1116.getKey()); + oprot.writeString(_iter1116.getValue()); } oprot.writeMapEnd(); } @@ -89501,10 +89962,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1077 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1117 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1077.getKey()); - oprot.writeString(_iter1077.getValue()); + oprot.writeString(_iter1117.getKey()); + oprot.writeString(_iter1117.getValue()); } } } @@ -89528,15 +89989,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1078 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1078.size); - String _key1079; - String _val1080; - for (int _i1081 = 0; _i1081 < _map1078.size; ++_i1081) + org.apache.thrift.protocol.TMap _map1118 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1118.size); + String _key1119; + String _val1120; + for (int _i1121 = 0; _i1121 < _map1118.size; ++_i1121) { - _key1079 = iprot.readString(); - _val1080 = iprot.readString(); - struct.partitionSpecs.put(_key1079, _val1080); + _key1119 = iprot.readString(); + _val1120 = iprot.readString(); + struct.partitionSpecs.put(_key1119, _val1120); } } struct.setPartitionSpecsIsSet(true); @@ -90982,15 +91443,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1082 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1082.size); - String _key1083; - String _val1084; - for (int _i1085 = 0; _i1085 < _map1082.size; ++_i1085) + org.apache.thrift.protocol.TMap _map1122 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1122.size); + String _key1123; + String _val1124; + for (int _i1125 = 0; _i1125 < _map1122.size; ++_i1125) { - _key1083 = iprot.readString(); - _val1084 = iprot.readString(); - struct.partitionSpecs.put(_key1083, _val1084); + _key1123 = iprot.readString(); + _val1124 = iprot.readString(); + struct.partitionSpecs.put(_key1123, _val1124); } iprot.readMapEnd(); } @@ -91048,10 +91509,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1086 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1126 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1086.getKey()); - oprot.writeString(_iter1086.getValue()); + oprot.writeString(_iter1126.getKey()); + oprot.writeString(_iter1126.getValue()); } oprot.writeMapEnd(); } @@ -91114,10 +91575,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1087 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1127 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1087.getKey()); - oprot.writeString(_iter1087.getValue()); + oprot.writeString(_iter1127.getKey()); + oprot.writeString(_iter1127.getValue()); } } } @@ -91141,15 +91602,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1088 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1088.size); - String _key1089; - String _val1090; - for (int _i1091 = 0; _i1091 < _map1088.size; ++_i1091) + org.apache.thrift.protocol.TMap _map1128 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1128.size); + String _key1129; + String _val1130; + for (int _i1131 = 0; _i1131 < _map1128.size; ++_i1131) { - _key1089 = iprot.readString(); - _val1090 = iprot.readString(); - struct.partitionSpecs.put(_key1089, _val1090); + _key1129 = iprot.readString(); + _val1130 = iprot.readString(); + struct.partitionSpecs.put(_key1129, _val1130); } } struct.setPartitionSpecsIsSet(true); @@ -91814,14 +92275,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1092 = iprot.readListBegin(); - struct.success = new ArrayList(_list1092.size); - Partition _elem1093; - for (int _i1094 = 0; _i1094 < _list1092.size; ++_i1094) + org.apache.thrift.protocol.TList _list1132 = iprot.readListBegin(); + struct.success = new ArrayList(_list1132.size); + Partition _elem1133; + for (int _i1134 = 0; _i1134 < _list1132.size; ++_i1134) { - _elem1093 = new Partition(); - _elem1093.read(iprot); - struct.success.add(_elem1093); + _elem1133 = new Partition(); + _elem1133.read(iprot); + struct.success.add(_elem1133); } iprot.readListEnd(); } @@ -91883,9 +92344,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1095 : struct.success) + for (Partition _iter1135 : struct.success) { - _iter1095.write(oprot); + _iter1135.write(oprot); } oprot.writeListEnd(); } @@ -91948,9 +92409,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1096 : struct.success) + for (Partition _iter1136 : struct.success) { - _iter1096.write(oprot); + _iter1136.write(oprot); } } } @@ -91974,14 +92435,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1097 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1097.size); - Partition _elem1098; - for (int _i1099 = 0; _i1099 < _list1097.size; ++_i1099) + org.apache.thrift.protocol.TList _list1137 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1137.size); + Partition _elem1138; + for (int _i1139 = 0; _i1139 < _list1137.size; ++_i1139) { - _elem1098 = new Partition(); - _elem1098.read(iprot); - struct.success.add(_elem1098); + _elem1138 = new Partition(); + _elem1138.read(iprot); + struct.success.add(_elem1138); } } struct.setSuccessIsSet(true); @@ -92680,13 +93141,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1100 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1100.size); - String _elem1101; - for (int _i1102 = 0; _i1102 < _list1100.size; ++_i1102) + org.apache.thrift.protocol.TList _list1140 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1140.size); + String _elem1141; + for (int _i1142 = 0; _i1142 < _list1140.size; ++_i1142) { - _elem1101 = iprot.readString(); - struct.part_vals.add(_elem1101); + _elem1141 = iprot.readString(); + struct.part_vals.add(_elem1141); } iprot.readListEnd(); } @@ -92706,13 +93167,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1103 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1103.size); - String _elem1104; - for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105) + org.apache.thrift.protocol.TList _list1143 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1143.size); + String _elem1144; + for (int _i1145 = 0; _i1145 < _list1143.size; ++_i1145) { - _elem1104 = iprot.readString(); - struct.group_names.add(_elem1104); + _elem1144 = iprot.readString(); + struct.group_names.add(_elem1144); } iprot.readListEnd(); } @@ -92748,9 +93209,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1106 : struct.part_vals) + for (String _iter1146 : struct.part_vals) { - oprot.writeString(_iter1106); + oprot.writeString(_iter1146); } oprot.writeListEnd(); } @@ -92765,9 +93226,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1107 : struct.group_names) + for (String _iter1147 : struct.group_names) { - oprot.writeString(_iter1107); + oprot.writeString(_iter1147); } oprot.writeListEnd(); } @@ -92816,9 +93277,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1108 : struct.part_vals) + for (String _iter1148 : struct.part_vals) { - oprot.writeString(_iter1108); + oprot.writeString(_iter1148); } } } @@ -92828,9 +93289,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1109 : struct.group_names) + for (String _iter1149 : struct.group_names) { - oprot.writeString(_iter1109); + oprot.writeString(_iter1149); } } } @@ -92850,13 +93311,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1110 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1110.size); - String _elem1111; - for (int _i1112 = 0; _i1112 < _list1110.size; ++_i1112) + org.apache.thrift.protocol.TList _list1150 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1150.size); + String _elem1151; + for (int _i1152 = 0; _i1152 < _list1150.size; ++_i1152) { - _elem1111 = iprot.readString(); - struct.part_vals.add(_elem1111); + _elem1151 = iprot.readString(); + struct.part_vals.add(_elem1151); } } struct.setPart_valsIsSet(true); @@ -92867,13 +93328,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1113 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1113.size); - String _elem1114; - for (int _i1115 = 0; _i1115 < _list1113.size; ++_i1115) + org.apache.thrift.protocol.TList _list1153 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1153.size); + String _elem1154; + for (int _i1155 = 0; _i1155 < _list1153.size; ++_i1155) { - _elem1114 = iprot.readString(); - struct.group_names.add(_elem1114); + _elem1154 = iprot.readString(); + struct.group_names.add(_elem1154); } } struct.setGroup_namesIsSet(true); @@ -95642,14 +96103,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1116 = iprot.readListBegin(); - struct.success = new ArrayList(_list1116.size); - Partition _elem1117; - for (int _i1118 = 0; _i1118 < _list1116.size; ++_i1118) + org.apache.thrift.protocol.TList _list1156 = iprot.readListBegin(); + struct.success = new ArrayList(_list1156.size); + Partition _elem1157; + for (int _i1158 = 0; _i1158 < _list1156.size; ++_i1158) { - _elem1117 = new Partition(); - _elem1117.read(iprot); - struct.success.add(_elem1117); + _elem1157 = new Partition(); + _elem1157.read(iprot); + struct.success.add(_elem1157); } iprot.readListEnd(); } @@ -95693,9 +96154,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1119 : struct.success) + for (Partition _iter1159 : struct.success) { - _iter1119.write(oprot); + _iter1159.write(oprot); } oprot.writeListEnd(); } @@ -95742,9 +96203,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1120 : struct.success) + for (Partition _iter1160 : struct.success) { - _iter1120.write(oprot); + _iter1160.write(oprot); } } } @@ -95762,14 +96223,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1121 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1121.size); - Partition _elem1122; - for (int _i1123 = 0; _i1123 < _list1121.size; ++_i1123) + org.apache.thrift.protocol.TList _list1161 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1161.size); + Partition _elem1162; + for (int _i1163 = 0; _i1163 < _list1161.size; ++_i1163) { - _elem1122 = new Partition(); - _elem1122.read(iprot); - struct.success.add(_elem1122); + _elem1162 = new Partition(); + _elem1162.read(iprot); + struct.success.add(_elem1162); } } struct.setSuccessIsSet(true); @@ -96459,13 +96920,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1124 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1124.size); - String _elem1125; - for (int _i1126 = 0; _i1126 < _list1124.size; ++_i1126) + org.apache.thrift.protocol.TList _list1164 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1164.size); + String _elem1165; + for (int _i1166 = 0; _i1166 < _list1164.size; ++_i1166) { - _elem1125 = iprot.readString(); - struct.group_names.add(_elem1125); + _elem1165 = iprot.readString(); + struct.group_names.add(_elem1165); } iprot.readListEnd(); } @@ -96509,9 +96970,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1127 : struct.group_names) + for (String _iter1167 : struct.group_names) { - oprot.writeString(_iter1127); + oprot.writeString(_iter1167); } oprot.writeListEnd(); } @@ -96566,9 +97027,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1128 : struct.group_names) + for (String _iter1168 : struct.group_names) { - oprot.writeString(_iter1128); + oprot.writeString(_iter1168); } } } @@ -96596,13 +97057,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1129 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1129.size); - String _elem1130; - for (int _i1131 = 0; _i1131 < _list1129.size; ++_i1131) + org.apache.thrift.protocol.TList _list1169 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1169.size); + String _elem1170; + for (int _i1171 = 0; _i1171 < _list1169.size; ++_i1171) { - _elem1130 = iprot.readString(); - struct.group_names.add(_elem1130); + _elem1170 = iprot.readString(); + struct.group_names.add(_elem1170); } } struct.setGroup_namesIsSet(true); @@ -97089,14 +97550,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1132 = iprot.readListBegin(); - struct.success = new ArrayList(_list1132.size); - Partition _elem1133; - for (int _i1134 = 0; _i1134 < _list1132.size; ++_i1134) + org.apache.thrift.protocol.TList _list1172 = iprot.readListBegin(); + struct.success = new ArrayList(_list1172.size); + Partition _elem1173; + for (int _i1174 = 0; _i1174 < _list1172.size; ++_i1174) { - _elem1133 = new Partition(); - _elem1133.read(iprot); - struct.success.add(_elem1133); + _elem1173 = new Partition(); + _elem1173.read(iprot); + struct.success.add(_elem1173); } iprot.readListEnd(); } @@ -97140,9 +97601,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1135 : struct.success) + for (Partition _iter1175 : struct.success) { - _iter1135.write(oprot); + _iter1175.write(oprot); } oprot.writeListEnd(); } @@ -97189,9 +97650,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1136 : struct.success) + for (Partition _iter1176 : struct.success) { - _iter1136.write(oprot); + _iter1176.write(oprot); } } } @@ -97209,14 +97670,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1137 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1137.size); - Partition _elem1138; - for (int _i1139 = 0; _i1139 < _list1137.size; ++_i1139) + org.apache.thrift.protocol.TList _list1177 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1177.size); + Partition _elem1178; + for (int _i1179 = 0; _i1179 < _list1177.size; ++_i1179) { - _elem1138 = new Partition(); - _elem1138.read(iprot); - struct.success.add(_elem1138); + _elem1178 = new Partition(); + _elem1178.read(iprot); + struct.success.add(_elem1178); } } struct.setSuccessIsSet(true); @@ -98279,14 +98740,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1140 = iprot.readListBegin(); - struct.success = new ArrayList(_list1140.size); - PartitionSpec _elem1141; - for (int _i1142 = 0; _i1142 < _list1140.size; ++_i1142) + org.apache.thrift.protocol.TList _list1180 = iprot.readListBegin(); + struct.success = new ArrayList(_list1180.size); + PartitionSpec _elem1181; + for (int _i1182 = 0; _i1182 < _list1180.size; ++_i1182) { - _elem1141 = new PartitionSpec(); - _elem1141.read(iprot); - struct.success.add(_elem1141); + _elem1181 = new PartitionSpec(); + _elem1181.read(iprot); + struct.success.add(_elem1181); } iprot.readListEnd(); } @@ -98330,9 +98791,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1143 : struct.success) + for (PartitionSpec _iter1183 : struct.success) { - _iter1143.write(oprot); + _iter1183.write(oprot); } oprot.writeListEnd(); } @@ -98379,9 +98840,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1144 : struct.success) + for (PartitionSpec _iter1184 : struct.success) { - _iter1144.write(oprot); + _iter1184.write(oprot); } } } @@ -98399,14 +98860,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1145 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1145.size); - PartitionSpec _elem1146; - for (int _i1147 = 0; _i1147 < _list1145.size; ++_i1147) + org.apache.thrift.protocol.TList _list1185 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1185.size); + PartitionSpec _elem1186; + for (int _i1187 = 0; _i1187 < _list1185.size; ++_i1187) { - _elem1146 = new PartitionSpec(); - _elem1146.read(iprot); - struct.success.add(_elem1146); + _elem1186 = new PartitionSpec(); + _elem1186.read(iprot); + struct.success.add(_elem1186); } } struct.setSuccessIsSet(true); @@ -99466,13 +99927,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1148 = iprot.readListBegin(); - struct.success = new ArrayList(_list1148.size); - String _elem1149; - for (int _i1150 = 0; _i1150 < _list1148.size; ++_i1150) + org.apache.thrift.protocol.TList _list1188 = iprot.readListBegin(); + struct.success = new ArrayList(_list1188.size); + String _elem1189; + for (int _i1190 = 0; _i1190 < _list1188.size; ++_i1190) { - _elem1149 = iprot.readString(); - struct.success.add(_elem1149); + _elem1189 = iprot.readString(); + struct.success.add(_elem1189); } iprot.readListEnd(); } @@ -99516,9 +99977,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1151 : struct.success) + for (String _iter1191 : struct.success) { - oprot.writeString(_iter1151); + oprot.writeString(_iter1191); } oprot.writeListEnd(); } @@ -99565,9 +100026,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1152 : struct.success) + for (String _iter1192 : struct.success) { - oprot.writeString(_iter1152); + oprot.writeString(_iter1192); } } } @@ -99585,13 +100046,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1153 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1153.size); - String _elem1154; - for (int _i1155 = 0; _i1155 < _list1153.size; ++_i1155) + org.apache.thrift.protocol.TList _list1193 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1193.size); + String _elem1194; + for (int _i1195 = 0; _i1195 < _list1193.size; ++_i1195) { - _elem1154 = iprot.readString(); - struct.success.add(_elem1154); + _elem1194 = iprot.readString(); + struct.success.add(_elem1194); } } struct.setSuccessIsSet(true); @@ -101122,13 +101583,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1156 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1156.size); - String _elem1157; - for (int _i1158 = 0; _i1158 < _list1156.size; ++_i1158) + org.apache.thrift.protocol.TList _list1196 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1196.size); + String _elem1197; + for (int _i1198 = 0; _i1198 < _list1196.size; ++_i1198) { - _elem1157 = iprot.readString(); - struct.part_vals.add(_elem1157); + _elem1197 = iprot.readString(); + struct.part_vals.add(_elem1197); } iprot.readListEnd(); } @@ -101172,9 +101633,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1159 : struct.part_vals) + for (String _iter1199 : struct.part_vals) { - oprot.writeString(_iter1159); + oprot.writeString(_iter1199); } oprot.writeListEnd(); } @@ -101223,9 +101684,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1160 : struct.part_vals) + for (String _iter1200 : struct.part_vals) { - oprot.writeString(_iter1160); + oprot.writeString(_iter1200); } } } @@ -101248,13 +101709,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1161 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1161.size); - String _elem1162; - for (int _i1163 = 0; _i1163 < _list1161.size; ++_i1163) + org.apache.thrift.protocol.TList _list1201 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1201.size); + String _elem1202; + for (int _i1203 = 0; _i1203 < _list1201.size; ++_i1203) { - _elem1162 = iprot.readString(); - struct.part_vals.add(_elem1162); + _elem1202 = iprot.readString(); + struct.part_vals.add(_elem1202); } } struct.setPart_valsIsSet(true); @@ -101745,14 +102206,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1164 = iprot.readListBegin(); - struct.success = new ArrayList(_list1164.size); - Partition _elem1165; - for (int _i1166 = 0; _i1166 < _list1164.size; ++_i1166) + org.apache.thrift.protocol.TList _list1204 = iprot.readListBegin(); + struct.success = new ArrayList(_list1204.size); + Partition _elem1205; + for (int _i1206 = 0; _i1206 < _list1204.size; ++_i1206) { - _elem1165 = new Partition(); - _elem1165.read(iprot); - struct.success.add(_elem1165); + _elem1205 = new Partition(); + _elem1205.read(iprot); + struct.success.add(_elem1205); } iprot.readListEnd(); } @@ -101796,9 +102257,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1167 : struct.success) + for (Partition _iter1207 : struct.success) { - _iter1167.write(oprot); + _iter1207.write(oprot); } oprot.writeListEnd(); } @@ -101845,9 +102306,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1168 : struct.success) + for (Partition _iter1208 : struct.success) { - _iter1168.write(oprot); + _iter1208.write(oprot); } } } @@ -101865,14 +102326,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1169 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1169.size); - Partition _elem1170; - for (int _i1171 = 0; _i1171 < _list1169.size; ++_i1171) + org.apache.thrift.protocol.TList _list1209 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1209.size); + Partition _elem1210; + for (int _i1211 = 0; _i1211 < _list1209.size; ++_i1211) { - _elem1170 = new Partition(); - _elem1170.read(iprot); - struct.success.add(_elem1170); + _elem1210 = new Partition(); + _elem1210.read(iprot); + struct.success.add(_elem1210); } } struct.setSuccessIsSet(true); @@ -102644,13 +103105,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1172 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1172.size); - String _elem1173; - for (int _i1174 = 0; _i1174 < _list1172.size; ++_i1174) + org.apache.thrift.protocol.TList _list1212 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1212.size); + String _elem1213; + for (int _i1214 = 0; _i1214 < _list1212.size; ++_i1214) { - _elem1173 = iprot.readString(); - struct.part_vals.add(_elem1173); + _elem1213 = iprot.readString(); + struct.part_vals.add(_elem1213); } iprot.readListEnd(); } @@ -102678,13 +103139,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1175 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1175.size); - String _elem1176; - for (int _i1177 = 0; _i1177 < _list1175.size; ++_i1177) + org.apache.thrift.protocol.TList _list1215 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1215.size); + String _elem1216; + for (int _i1217 = 0; _i1217 < _list1215.size; ++_i1217) { - _elem1176 = iprot.readString(); - struct.group_names.add(_elem1176); + _elem1216 = iprot.readString(); + struct.group_names.add(_elem1216); } iprot.readListEnd(); } @@ -102720,9 +103181,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1178 : struct.part_vals) + for (String _iter1218 : struct.part_vals) { - oprot.writeString(_iter1178); + oprot.writeString(_iter1218); } oprot.writeListEnd(); } @@ -102740,9 +103201,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1179 : struct.group_names) + for (String _iter1219 : struct.group_names) { - oprot.writeString(_iter1179); + oprot.writeString(_iter1219); } oprot.writeListEnd(); } @@ -102794,9 +103255,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1180 : struct.part_vals) + for (String _iter1220 : struct.part_vals) { - oprot.writeString(_iter1180); + oprot.writeString(_iter1220); } } } @@ -102809,9 +103270,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1181 : struct.group_names) + for (String _iter1221 : struct.group_names) { - oprot.writeString(_iter1181); + oprot.writeString(_iter1221); } } } @@ -102831,13 +103292,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1182 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1182.size); - String _elem1183; - for (int _i1184 = 0; _i1184 < _list1182.size; ++_i1184) + org.apache.thrift.protocol.TList _list1222 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1222.size); + String _elem1223; + for (int _i1224 = 0; _i1224 < _list1222.size; ++_i1224) { - _elem1183 = iprot.readString(); - struct.part_vals.add(_elem1183); + _elem1223 = iprot.readString(); + struct.part_vals.add(_elem1223); } } struct.setPart_valsIsSet(true); @@ -102852,13 +103313,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1185 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1185.size); - String _elem1186; - for (int _i1187 = 0; _i1187 < _list1185.size; ++_i1187) + org.apache.thrift.protocol.TList _list1225 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1225.size); + String _elem1226; + for (int _i1227 = 0; _i1227 < _list1225.size; ++_i1227) { - _elem1186 = iprot.readString(); - struct.group_names.add(_elem1186); + _elem1226 = iprot.readString(); + struct.group_names.add(_elem1226); } } struct.setGroup_namesIsSet(true); @@ -103345,14 +103806,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1188 = iprot.readListBegin(); - struct.success = new ArrayList(_list1188.size); - Partition _elem1189; - for (int _i1190 = 0; _i1190 < _list1188.size; ++_i1190) + org.apache.thrift.protocol.TList _list1228 = iprot.readListBegin(); + struct.success = new ArrayList(_list1228.size); + Partition _elem1229; + for (int _i1230 = 0; _i1230 < _list1228.size; ++_i1230) { - _elem1189 = new Partition(); - _elem1189.read(iprot); - struct.success.add(_elem1189); + _elem1229 = new Partition(); + _elem1229.read(iprot); + struct.success.add(_elem1229); } iprot.readListEnd(); } @@ -103396,9 +103857,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1191 : struct.success) + for (Partition _iter1231 : struct.success) { - _iter1191.write(oprot); + _iter1231.write(oprot); } oprot.writeListEnd(); } @@ -103445,9 +103906,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1192 : struct.success) + for (Partition _iter1232 : struct.success) { - _iter1192.write(oprot); + _iter1232.write(oprot); } } } @@ -103465,14 +103926,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1193 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1193.size); - Partition _elem1194; - for (int _i1195 = 0; _i1195 < _list1193.size; ++_i1195) + org.apache.thrift.protocol.TList _list1233 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1233.size); + Partition _elem1234; + for (int _i1235 = 0; _i1235 < _list1233.size; ++_i1235) { - _elem1194 = new Partition(); - _elem1194.read(iprot); - struct.success.add(_elem1194); + _elem1234 = new Partition(); + _elem1234.read(iprot); + struct.success.add(_elem1234); } } struct.setSuccessIsSet(true); @@ -104065,13 +104526,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1196 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1196.size); - String _elem1197; - for (int _i1198 = 0; _i1198 < _list1196.size; ++_i1198) + org.apache.thrift.protocol.TList _list1236 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1236.size); + String _elem1237; + for (int _i1238 = 0; _i1238 < _list1236.size; ++_i1238) { - _elem1197 = iprot.readString(); - struct.part_vals.add(_elem1197); + _elem1237 = iprot.readString(); + struct.part_vals.add(_elem1237); } iprot.readListEnd(); } @@ -104115,9 +104576,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1199 : struct.part_vals) + for (String _iter1239 : struct.part_vals) { - oprot.writeString(_iter1199); + oprot.writeString(_iter1239); } oprot.writeListEnd(); } @@ -104166,9 +104627,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1200 : struct.part_vals) + for (String _iter1240 : struct.part_vals) { - oprot.writeString(_iter1200); + oprot.writeString(_iter1240); } } } @@ -104191,13 +104652,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1201 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1201.size); - String _elem1202; - for (int _i1203 = 0; _i1203 < _list1201.size; ++_i1203) + org.apache.thrift.protocol.TList _list1241 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1241.size); + String _elem1242; + for (int _i1243 = 0; _i1243 < _list1241.size; ++_i1243) { - _elem1202 = iprot.readString(); - struct.part_vals.add(_elem1202); + _elem1242 = iprot.readString(); + struct.part_vals.add(_elem1242); } } struct.setPart_valsIsSet(true); @@ -104685,13 +105146,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1204 = iprot.readListBegin(); - struct.success = new ArrayList(_list1204.size); - String _elem1205; - for (int _i1206 = 0; _i1206 < _list1204.size; ++_i1206) + org.apache.thrift.protocol.TList _list1244 = iprot.readListBegin(); + struct.success = new ArrayList(_list1244.size); + String _elem1245; + for (int _i1246 = 0; _i1246 < _list1244.size; ++_i1246) { - _elem1205 = iprot.readString(); - struct.success.add(_elem1205); + _elem1245 = iprot.readString(); + struct.success.add(_elem1245); } iprot.readListEnd(); } @@ -104735,9 +105196,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1207 : struct.success) + for (String _iter1247 : struct.success) { - oprot.writeString(_iter1207); + oprot.writeString(_iter1247); } oprot.writeListEnd(); } @@ -104784,9 +105245,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1208 : struct.success) + for (String _iter1248 : struct.success) { - oprot.writeString(_iter1208); + oprot.writeString(_iter1248); } } } @@ -104804,13 +105265,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1209 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1209.size); - String _elem1210; - for (int _i1211 = 0; _i1211 < _list1209.size; ++_i1211) + org.apache.thrift.protocol.TList _list1249 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1249.size); + String _elem1250; + for (int _i1251 = 0; _i1251 < _list1249.size; ++_i1251) { - _elem1210 = iprot.readString(); - struct.success.add(_elem1210); + _elem1250 = iprot.readString(); + struct.success.add(_elem1250); } } struct.setSuccessIsSet(true); @@ -105977,14 +106438,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1212 = iprot.readListBegin(); - struct.success = new ArrayList(_list1212.size); - Partition _elem1213; - for (int _i1214 = 0; _i1214 < _list1212.size; ++_i1214) + org.apache.thrift.protocol.TList _list1252 = iprot.readListBegin(); + struct.success = new ArrayList(_list1252.size); + Partition _elem1253; + for (int _i1254 = 0; _i1254 < _list1252.size; ++_i1254) { - _elem1213 = new Partition(); - _elem1213.read(iprot); - struct.success.add(_elem1213); + _elem1253 = new Partition(); + _elem1253.read(iprot); + struct.success.add(_elem1253); } iprot.readListEnd(); } @@ -106028,9 +106489,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1215 : struct.success) + for (Partition _iter1255 : struct.success) { - _iter1215.write(oprot); + _iter1255.write(oprot); } oprot.writeListEnd(); } @@ -106077,9 +106538,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1216 : struct.success) + for (Partition _iter1256 : struct.success) { - _iter1216.write(oprot); + _iter1256.write(oprot); } } } @@ -106097,14 +106558,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1217 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1217.size); - Partition _elem1218; - for (int _i1219 = 0; _i1219 < _list1217.size; ++_i1219) + org.apache.thrift.protocol.TList _list1257 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1257.size); + Partition _elem1258; + for (int _i1259 = 0; _i1259 < _list1257.size; ++_i1259) { - _elem1218 = new Partition(); - _elem1218.read(iprot); - struct.success.add(_elem1218); + _elem1258 = new Partition(); + _elem1258.read(iprot); + struct.success.add(_elem1258); } } struct.setSuccessIsSet(true); @@ -107271,14 +107732,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1220 = iprot.readListBegin(); - struct.success = new ArrayList(_list1220.size); - PartitionSpec _elem1221; - for (int _i1222 = 0; _i1222 < _list1220.size; ++_i1222) + org.apache.thrift.protocol.TList _list1260 = iprot.readListBegin(); + struct.success = new ArrayList(_list1260.size); + PartitionSpec _elem1261; + for (int _i1262 = 0; _i1262 < _list1260.size; ++_i1262) { - _elem1221 = new PartitionSpec(); - _elem1221.read(iprot); - struct.success.add(_elem1221); + _elem1261 = new PartitionSpec(); + _elem1261.read(iprot); + struct.success.add(_elem1261); } iprot.readListEnd(); } @@ -107322,9 +107783,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1223 : struct.success) + for (PartitionSpec _iter1263 : struct.success) { - _iter1223.write(oprot); + _iter1263.write(oprot); } oprot.writeListEnd(); } @@ -107371,9 +107832,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1224 : struct.success) + for (PartitionSpec _iter1264 : struct.success) { - _iter1224.write(oprot); + _iter1264.write(oprot); } } } @@ -107391,14 +107852,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1225 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1225.size); - PartitionSpec _elem1226; - for (int _i1227 = 0; _i1227 < _list1225.size; ++_i1227) + org.apache.thrift.protocol.TList _list1265 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1265.size); + PartitionSpec _elem1266; + for (int _i1267 = 0; _i1267 < _list1265.size; ++_i1267) { - _elem1226 = new PartitionSpec(); - _elem1226.read(iprot); - struct.success.add(_elem1226); + _elem1266 = new PartitionSpec(); + _elem1266.read(iprot); + struct.success.add(_elem1266); } } struct.setSuccessIsSet(true); @@ -109982,13 +110443,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1228 = iprot.readListBegin(); - struct.names = new ArrayList(_list1228.size); - String _elem1229; - for (int _i1230 = 0; _i1230 < _list1228.size; ++_i1230) + org.apache.thrift.protocol.TList _list1268 = iprot.readListBegin(); + struct.names = new ArrayList(_list1268.size); + String _elem1269; + for (int _i1270 = 0; _i1270 < _list1268.size; ++_i1270) { - _elem1229 = iprot.readString(); - struct.names.add(_elem1229); + _elem1269 = iprot.readString(); + struct.names.add(_elem1269); } iprot.readListEnd(); } @@ -110024,9 +110485,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1231 : struct.names) + for (String _iter1271 : struct.names) { - oprot.writeString(_iter1231); + oprot.writeString(_iter1271); } oprot.writeListEnd(); } @@ -110069,9 +110530,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1232 : struct.names) + for (String _iter1272 : struct.names) { - oprot.writeString(_iter1232); + oprot.writeString(_iter1272); } } } @@ -110091,13 +110552,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1233 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1233.size); - String _elem1234; - for (int _i1235 = 0; _i1235 < _list1233.size; ++_i1235) + org.apache.thrift.protocol.TList _list1273 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1273.size); + String _elem1274; + for (int _i1275 = 0; _i1275 < _list1273.size; ++_i1275) { - _elem1234 = iprot.readString(); - struct.names.add(_elem1234); + _elem1274 = iprot.readString(); + struct.names.add(_elem1274); } } struct.setNamesIsSet(true); @@ -110584,14 +111045,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1236 = iprot.readListBegin(); - struct.success = new ArrayList(_list1236.size); - Partition _elem1237; - for (int _i1238 = 0; _i1238 < _list1236.size; ++_i1238) + org.apache.thrift.protocol.TList _list1276 = iprot.readListBegin(); + struct.success = new ArrayList(_list1276.size); + Partition _elem1277; + for (int _i1278 = 0; _i1278 < _list1276.size; ++_i1278) { - _elem1237 = new Partition(); - _elem1237.read(iprot); - struct.success.add(_elem1237); + _elem1277 = new Partition(); + _elem1277.read(iprot); + struct.success.add(_elem1277); } iprot.readListEnd(); } @@ -110635,9 +111096,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1239 : struct.success) + for (Partition _iter1279 : struct.success) { - _iter1239.write(oprot); + _iter1279.write(oprot); } oprot.writeListEnd(); } @@ -110684,9 +111145,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1240 : struct.success) + for (Partition _iter1280 : struct.success) { - _iter1240.write(oprot); + _iter1280.write(oprot); } } } @@ -110704,14 +111165,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1241 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1241.size); - Partition _elem1242; - for (int _i1243 = 0; _i1243 < _list1241.size; ++_i1243) + org.apache.thrift.protocol.TList _list1281 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1281.size); + Partition _elem1282; + for (int _i1283 = 0; _i1283 < _list1281.size; ++_i1283) { - _elem1242 = new Partition(); - _elem1242.read(iprot); - struct.success.add(_elem1242); + _elem1282 = new Partition(); + _elem1282.read(iprot); + struct.success.add(_elem1282); } } struct.setSuccessIsSet(true); @@ -112261,14 +112722,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1244 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1244.size); - Partition _elem1245; - for (int _i1246 = 0; _i1246 < _list1244.size; ++_i1246) + org.apache.thrift.protocol.TList _list1284 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1284.size); + Partition _elem1285; + for (int _i1286 = 0; _i1286 < _list1284.size; ++_i1286) { - _elem1245 = new Partition(); - _elem1245.read(iprot); - struct.new_parts.add(_elem1245); + _elem1285 = new Partition(); + _elem1285.read(iprot); + struct.new_parts.add(_elem1285); } iprot.readListEnd(); } @@ -112304,9 +112765,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1247 : struct.new_parts) + for (Partition _iter1287 : struct.new_parts) { - _iter1247.write(oprot); + _iter1287.write(oprot); } oprot.writeListEnd(); } @@ -112349,9 +112810,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1248 : struct.new_parts) + for (Partition _iter1288 : struct.new_parts) { - _iter1248.write(oprot); + _iter1288.write(oprot); } } } @@ -112371,14 +112832,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1249 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1249.size); - Partition _elem1250; - for (int _i1251 = 0; _i1251 < _list1249.size; ++_i1251) + org.apache.thrift.protocol.TList _list1289 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1289.size); + Partition _elem1290; + for (int _i1291 = 0; _i1291 < _list1289.size; ++_i1291) { - _elem1250 = new Partition(); - _elem1250.read(iprot); - struct.new_parts.add(_elem1250); + _elem1290 = new Partition(); + _elem1290.read(iprot); + struct.new_parts.add(_elem1290); } } struct.setNew_partsIsSet(true); @@ -113431,14 +113892,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1252 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1252.size); - Partition _elem1253; - for (int _i1254 = 0; _i1254 < _list1252.size; ++_i1254) + org.apache.thrift.protocol.TList _list1292 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1292.size); + Partition _elem1293; + for (int _i1294 = 0; _i1294 < _list1292.size; ++_i1294) { - _elem1253 = new Partition(); - _elem1253.read(iprot); - struct.new_parts.add(_elem1253); + _elem1293 = new Partition(); + _elem1293.read(iprot); + struct.new_parts.add(_elem1293); } iprot.readListEnd(); } @@ -113483,9 +113944,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1255 : struct.new_parts) + for (Partition _iter1295 : struct.new_parts) { - _iter1255.write(oprot); + _iter1295.write(oprot); } oprot.writeListEnd(); } @@ -113536,9 +113997,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1256 : struct.new_parts) + for (Partition _iter1296 : struct.new_parts) { - _iter1256.write(oprot); + _iter1296.write(oprot); } } } @@ -113561,14 +114022,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1257 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1257.size); - Partition _elem1258; - for (int _i1259 = 0; _i1259 < _list1257.size; ++_i1259) + org.apache.thrift.protocol.TList _list1297 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1297.size); + Partition _elem1298; + for (int _i1299 = 0; _i1299 < _list1297.size; ++_i1299) { - _elem1258 = new Partition(); - _elem1258.read(iprot); - struct.new_parts.add(_elem1258); + _elem1298 = new Partition(); + _elem1298.read(iprot); + struct.new_parts.add(_elem1298); } } struct.setNew_partsIsSet(true); @@ -115769,13 +116230,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1260 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1260.size); - String _elem1261; - for (int _i1262 = 0; _i1262 < _list1260.size; ++_i1262) + org.apache.thrift.protocol.TList _list1300 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1300.size); + String _elem1301; + for (int _i1302 = 0; _i1302 < _list1300.size; ++_i1302) { - _elem1261 = iprot.readString(); - struct.part_vals.add(_elem1261); + _elem1301 = iprot.readString(); + struct.part_vals.add(_elem1301); } iprot.readListEnd(); } @@ -115820,9 +116281,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1263 : struct.part_vals) + for (String _iter1303 : struct.part_vals) { - oprot.writeString(_iter1263); + oprot.writeString(_iter1303); } oprot.writeListEnd(); } @@ -115873,9 +116334,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1264 : struct.part_vals) + for (String _iter1304 : struct.part_vals) { - oprot.writeString(_iter1264); + oprot.writeString(_iter1304); } } } @@ -115898,13 +116359,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1265 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1265.size); - String _elem1266; - for (int _i1267 = 0; _i1267 < _list1265.size; ++_i1267) + org.apache.thrift.protocol.TList _list1305 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1305.size); + String _elem1306; + for (int _i1307 = 0; _i1307 < _list1305.size; ++_i1307) { - _elem1266 = iprot.readString(); - struct.part_vals.add(_elem1266); + _elem1306 = iprot.readString(); + struct.part_vals.add(_elem1306); } } struct.setPart_valsIsSet(true); @@ -116778,13 +117239,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1268 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1268.size); - String _elem1269; - for (int _i1270 = 0; _i1270 < _list1268.size; ++_i1270) + org.apache.thrift.protocol.TList _list1308 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1308.size); + String _elem1309; + for (int _i1310 = 0; _i1310 < _list1308.size; ++_i1310) { - _elem1269 = iprot.readString(); - struct.part_vals.add(_elem1269); + _elem1309 = iprot.readString(); + struct.part_vals.add(_elem1309); } iprot.readListEnd(); } @@ -116818,9 +117279,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1271 : struct.part_vals) + for (String _iter1311 : struct.part_vals) { - oprot.writeString(_iter1271); + oprot.writeString(_iter1311); } oprot.writeListEnd(); } @@ -116857,9 +117318,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1272 : struct.part_vals) + for (String _iter1312 : struct.part_vals) { - oprot.writeString(_iter1272); + oprot.writeString(_iter1312); } } } @@ -116874,13 +117335,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1273 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1273.size); - String _elem1274; - for (int _i1275 = 0; _i1275 < _list1273.size; ++_i1275) + org.apache.thrift.protocol.TList _list1313 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1313.size); + String _elem1314; + for (int _i1315 = 0; _i1315 < _list1313.size; ++_i1315) { - _elem1274 = iprot.readString(); - struct.part_vals.add(_elem1274); + _elem1314 = iprot.readString(); + struct.part_vals.add(_elem1314); } } struct.setPart_valsIsSet(true); @@ -119035,13 +119496,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1276 = iprot.readListBegin(); - struct.success = new ArrayList(_list1276.size); - String _elem1277; - for (int _i1278 = 0; _i1278 < _list1276.size; ++_i1278) + org.apache.thrift.protocol.TList _list1316 = iprot.readListBegin(); + struct.success = new ArrayList(_list1316.size); + String _elem1317; + for (int _i1318 = 0; _i1318 < _list1316.size; ++_i1318) { - _elem1277 = iprot.readString(); - struct.success.add(_elem1277); + _elem1317 = iprot.readString(); + struct.success.add(_elem1317); } iprot.readListEnd(); } @@ -119076,9 +119537,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1279 : struct.success) + for (String _iter1319 : struct.success) { - oprot.writeString(_iter1279); + oprot.writeString(_iter1319); } oprot.writeListEnd(); } @@ -119117,9 +119578,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1280 : struct.success) + for (String _iter1320 : struct.success) { - oprot.writeString(_iter1280); + oprot.writeString(_iter1320); } } } @@ -119134,13 +119595,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1281 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1281.size); - String _elem1282; - for (int _i1283 = 0; _i1283 < _list1281.size; ++_i1283) + org.apache.thrift.protocol.TList _list1321 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1321.size); + String _elem1322; + for (int _i1323 = 0; _i1323 < _list1321.size; ++_i1323) { - _elem1282 = iprot.readString(); - struct.success.add(_elem1282); + _elem1322 = iprot.readString(); + struct.success.add(_elem1322); } } struct.setSuccessIsSet(true); @@ -119903,15 +120364,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1284 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1284.size); - String _key1285; - String _val1286; - for (int _i1287 = 0; _i1287 < _map1284.size; ++_i1287) + org.apache.thrift.protocol.TMap _map1324 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1324.size); + String _key1325; + String _val1326; + for (int _i1327 = 0; _i1327 < _map1324.size; ++_i1327) { - _key1285 = iprot.readString(); - _val1286 = iprot.readString(); - struct.success.put(_key1285, _val1286); + _key1325 = iprot.readString(); + _val1326 = iprot.readString(); + struct.success.put(_key1325, _val1326); } iprot.readMapEnd(); } @@ -119946,10 +120407,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1288 : struct.success.entrySet()) + for (Map.Entry _iter1328 : struct.success.entrySet()) { - oprot.writeString(_iter1288.getKey()); - oprot.writeString(_iter1288.getValue()); + oprot.writeString(_iter1328.getKey()); + oprot.writeString(_iter1328.getValue()); } oprot.writeMapEnd(); } @@ -119988,10 +120449,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1289 : struct.success.entrySet()) + for (Map.Entry _iter1329 : struct.success.entrySet()) { - oprot.writeString(_iter1289.getKey()); - oprot.writeString(_iter1289.getValue()); + oprot.writeString(_iter1329.getKey()); + oprot.writeString(_iter1329.getValue()); } } } @@ -120006,15 +120467,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1290 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1290.size); - String _key1291; - String _val1292; - for (int _i1293 = 0; _i1293 < _map1290.size; ++_i1293) + org.apache.thrift.protocol.TMap _map1330 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1330.size); + String _key1331; + String _val1332; + for (int _i1333 = 0; _i1333 < _map1330.size; ++_i1333) { - _key1291 = iprot.readString(); - _val1292 = iprot.readString(); - struct.success.put(_key1291, _val1292); + _key1331 = iprot.readString(); + _val1332 = iprot.readString(); + struct.success.put(_key1331, _val1332); } } struct.setSuccessIsSet(true); @@ -120609,15 +121070,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1294 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1294.size); - String _key1295; - String _val1296; - for (int _i1297 = 0; _i1297 < _map1294.size; ++_i1297) + org.apache.thrift.protocol.TMap _map1334 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1334.size); + String _key1335; + String _val1336; + for (int _i1337 = 0; _i1337 < _map1334.size; ++_i1337) { - _key1295 = iprot.readString(); - _val1296 = iprot.readString(); - struct.part_vals.put(_key1295, _val1296); + _key1335 = iprot.readString(); + _val1336 = iprot.readString(); + struct.part_vals.put(_key1335, _val1336); } iprot.readMapEnd(); } @@ -120661,10 +121122,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1298 : struct.part_vals.entrySet()) + for (Map.Entry _iter1338 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1298.getKey()); - oprot.writeString(_iter1298.getValue()); + oprot.writeString(_iter1338.getKey()); + oprot.writeString(_iter1338.getValue()); } oprot.writeMapEnd(); } @@ -120715,10 +121176,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1299 : struct.part_vals.entrySet()) + for (Map.Entry _iter1339 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1299.getKey()); - oprot.writeString(_iter1299.getValue()); + oprot.writeString(_iter1339.getKey()); + oprot.writeString(_iter1339.getValue()); } } } @@ -120741,15 +121202,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1300 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1300.size); - String _key1301; - String _val1302; - for (int _i1303 = 0; _i1303 < _map1300.size; ++_i1303) + org.apache.thrift.protocol.TMap _map1340 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1340.size); + String _key1341; + String _val1342; + for (int _i1343 = 0; _i1343 < _map1340.size; ++_i1343) { - _key1301 = iprot.readString(); - _val1302 = iprot.readString(); - struct.part_vals.put(_key1301, _val1302); + _key1341 = iprot.readString(); + _val1342 = iprot.readString(); + struct.part_vals.put(_key1341, _val1342); } } struct.setPart_valsIsSet(true); @@ -122233,15 +122694,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1304 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1304.size); - String _key1305; - String _val1306; - for (int _i1307 = 0; _i1307 < _map1304.size; ++_i1307) + org.apache.thrift.protocol.TMap _map1344 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1344.size); + String _key1345; + String _val1346; + for (int _i1347 = 0; _i1347 < _map1344.size; ++_i1347) { - _key1305 = iprot.readString(); - _val1306 = iprot.readString(); - struct.part_vals.put(_key1305, _val1306); + _key1345 = iprot.readString(); + _val1346 = iprot.readString(); + struct.part_vals.put(_key1345, _val1346); } iprot.readMapEnd(); } @@ -122285,10 +122746,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1308 : struct.part_vals.entrySet()) + for (Map.Entry _iter1348 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1308.getKey()); - oprot.writeString(_iter1308.getValue()); + oprot.writeString(_iter1348.getKey()); + oprot.writeString(_iter1348.getValue()); } oprot.writeMapEnd(); } @@ -122339,10 +122800,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1309 : struct.part_vals.entrySet()) + for (Map.Entry _iter1349 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1309.getKey()); - oprot.writeString(_iter1309.getValue()); + oprot.writeString(_iter1349.getKey()); + oprot.writeString(_iter1349.getValue()); } } } @@ -122365,15 +122826,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1310 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1310.size); - String _key1311; - String _val1312; - for (int _i1313 = 0; _i1313 < _map1310.size; ++_i1313) + org.apache.thrift.protocol.TMap _map1350 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1350.size); + String _key1351; + String _val1352; + for (int _i1353 = 0; _i1353 < _map1350.size; ++_i1353) { - _key1311 = iprot.readString(); - _val1312 = iprot.readString(); - struct.part_vals.put(_key1311, _val1312); + _key1351 = iprot.readString(); + _val1352 = iprot.readString(); + struct.part_vals.put(_key1351, _val1352); } } struct.setPart_valsIsSet(true); @@ -129097,14 +129558,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1314 = iprot.readListBegin(); - struct.success = new ArrayList(_list1314.size); - Index _elem1315; - for (int _i1316 = 0; _i1316 < _list1314.size; ++_i1316) + org.apache.thrift.protocol.TList _list1354 = iprot.readListBegin(); + struct.success = new ArrayList(_list1354.size); + Index _elem1355; + for (int _i1356 = 0; _i1356 < _list1354.size; ++_i1356) { - _elem1315 = new Index(); - _elem1315.read(iprot); - struct.success.add(_elem1315); + _elem1355 = new Index(); + _elem1355.read(iprot); + struct.success.add(_elem1355); } iprot.readListEnd(); } @@ -129148,9 +129609,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Index _iter1317 : struct.success) + for (Index _iter1357 : struct.success) { - _iter1317.write(oprot); + _iter1357.write(oprot); } oprot.writeListEnd(); } @@ -129197,9 +129658,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Index _iter1318 : struct.success) + for (Index _iter1358 : struct.success) { - _iter1318.write(oprot); + _iter1358.write(oprot); } } } @@ -129217,14 +129678,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result s BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1319 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1319.size); - Index _elem1320; - for (int _i1321 = 0; _i1321 < _list1319.size; ++_i1321) + org.apache.thrift.protocol.TList _list1359 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1359.size); + Index _elem1360; + for (int _i1361 = 0; _i1361 < _list1359.size; ++_i1361) { - _elem1320 = new Index(); - _elem1320.read(iprot); - struct.success.add(_elem1320); + _elem1360 = new Index(); + _elem1360.read(iprot); + struct.success.add(_elem1360); } } struct.setSuccessIsSet(true); @@ -130203,13 +130664,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1322 = iprot.readListBegin(); - struct.success = new ArrayList(_list1322.size); - String _elem1323; - for (int _i1324 = 0; _i1324 < _list1322.size; ++_i1324) + org.apache.thrift.protocol.TList _list1362 = iprot.readListBegin(); + struct.success = new ArrayList(_list1362.size); + String _elem1363; + for (int _i1364 = 0; _i1364 < _list1362.size; ++_i1364) { - _elem1323 = iprot.readString(); - struct.success.add(_elem1323); + _elem1363 = iprot.readString(); + struct.success.add(_elem1363); } iprot.readListEnd(); } @@ -130244,9 +130705,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1325 : struct.success) + for (String _iter1365 : struct.success) { - oprot.writeString(_iter1325); + oprot.writeString(_iter1365); } oprot.writeListEnd(); } @@ -130285,9 +130746,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1326 : struct.success) + for (String _iter1366 : struct.success) { - oprot.writeString(_iter1326); + oprot.writeString(_iter1366); } } } @@ -130302,13 +130763,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1327 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1327.size); - String _elem1328; - for (int _i1329 = 0; _i1329 < _list1327.size; ++_i1329) + org.apache.thrift.protocol.TList _list1367 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1367.size); + String _elem1368; + for (int _i1369 = 0; _i1369 < _list1367.size; ++_i1369) { - _elem1328 = iprot.readString(); - struct.success.add(_elem1328); + _elem1368 = iprot.readString(); + struct.success.add(_elem1368); } } struct.setSuccessIsSet(true); @@ -149795,13 +150256,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1330 = iprot.readListBegin(); - struct.success = new ArrayList(_list1330.size); - String _elem1331; - for (int _i1332 = 0; _i1332 < _list1330.size; ++_i1332) + org.apache.thrift.protocol.TList _list1370 = iprot.readListBegin(); + struct.success = new ArrayList(_list1370.size); + String _elem1371; + for (int _i1372 = 0; _i1372 < _list1370.size; ++_i1372) { - _elem1331 = iprot.readString(); - struct.success.add(_elem1331); + _elem1371 = iprot.readString(); + struct.success.add(_elem1371); } iprot.readListEnd(); } @@ -149836,9 +150297,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1333 : struct.success) + for (String _iter1373 : struct.success) { - oprot.writeString(_iter1333); + oprot.writeString(_iter1373); } oprot.writeListEnd(); } @@ -149877,9 +150338,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1334 : struct.success) + for (String _iter1374 : struct.success) { - oprot.writeString(_iter1334); + oprot.writeString(_iter1374); } } } @@ -149894,13 +150355,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1335 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1335.size); - String _elem1336; - for (int _i1337 = 0; _i1337 < _list1335.size; ++_i1337) + org.apache.thrift.protocol.TList _list1375 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1375.size); + String _elem1376; + for (int _i1377 = 0; _i1377 < _list1375.size; ++_i1377) { - _elem1336 = iprot.readString(); - struct.success.add(_elem1336); + _elem1376 = iprot.readString(); + struct.success.add(_elem1376); } } struct.setSuccessIsSet(true); @@ -153955,13 +154416,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1338 = iprot.readListBegin(); - struct.success = new ArrayList(_list1338.size); - String _elem1339; - for (int _i1340 = 0; _i1340 < _list1338.size; ++_i1340) + org.apache.thrift.protocol.TList _list1378 = iprot.readListBegin(); + struct.success = new ArrayList(_list1378.size); + String _elem1379; + for (int _i1380 = 0; _i1380 < _list1378.size; ++_i1380) { - _elem1339 = iprot.readString(); - struct.success.add(_elem1339); + _elem1379 = iprot.readString(); + struct.success.add(_elem1379); } iprot.readListEnd(); } @@ -153996,9 +154457,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1341 : struct.success) + for (String _iter1381 : struct.success) { - oprot.writeString(_iter1341); + oprot.writeString(_iter1381); } oprot.writeListEnd(); } @@ -154037,9 +154498,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1342 : struct.success) + for (String _iter1382 : struct.success) { - oprot.writeString(_iter1342); + oprot.writeString(_iter1382); } } } @@ -154054,13 +154515,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1343 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1343.size); - String _elem1344; - for (int _i1345 = 0; _i1345 < _list1343.size; ++_i1345) + org.apache.thrift.protocol.TList _list1383 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1383.size); + String _elem1384; + for (int _i1385 = 0; _i1385 < _list1383.size; ++_i1385) { - _elem1344 = iprot.readString(); - struct.success.add(_elem1344); + _elem1384 = iprot.readString(); + struct.success.add(_elem1384); } } struct.setSuccessIsSet(true); @@ -157351,14 +157812,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1346 = iprot.readListBegin(); - struct.success = new ArrayList(_list1346.size); - Role _elem1347; - for (int _i1348 = 0; _i1348 < _list1346.size; ++_i1348) + org.apache.thrift.protocol.TList _list1386 = iprot.readListBegin(); + struct.success = new ArrayList(_list1386.size); + Role _elem1387; + for (int _i1388 = 0; _i1388 < _list1386.size; ++_i1388) { - _elem1347 = new Role(); - _elem1347.read(iprot); - struct.success.add(_elem1347); + _elem1387 = new Role(); + _elem1387.read(iprot); + struct.success.add(_elem1387); } iprot.readListEnd(); } @@ -157393,9 +157854,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1349 : struct.success) + for (Role _iter1389 : struct.success) { - _iter1349.write(oprot); + _iter1389.write(oprot); } oprot.writeListEnd(); } @@ -157434,9 +157895,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1350 : struct.success) + for (Role _iter1390 : struct.success) { - _iter1350.write(oprot); + _iter1390.write(oprot); } } } @@ -157451,14 +157912,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1351 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1351.size); - Role _elem1352; - for (int _i1353 = 0; _i1353 < _list1351.size; ++_i1353) + org.apache.thrift.protocol.TList _list1391 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1391.size); + Role _elem1392; + for (int _i1393 = 0; _i1393 < _list1391.size; ++_i1393) { - _elem1352 = new Role(); - _elem1352.read(iprot); - struct.success.add(_elem1352); + _elem1392 = new Role(); + _elem1392.read(iprot); + struct.success.add(_elem1392); } } struct.setSuccessIsSet(true); @@ -160463,13 +160924,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1354 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1354.size); - String _elem1355; - for (int _i1356 = 0; _i1356 < _list1354.size; ++_i1356) + org.apache.thrift.protocol.TList _list1394 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1394.size); + String _elem1395; + for (int _i1396 = 0; _i1396 < _list1394.size; ++_i1396) { - _elem1355 = iprot.readString(); - struct.group_names.add(_elem1355); + _elem1395 = iprot.readString(); + struct.group_names.add(_elem1395); } iprot.readListEnd(); } @@ -160505,9 +160966,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1357 : struct.group_names) + for (String _iter1397 : struct.group_names) { - oprot.writeString(_iter1357); + oprot.writeString(_iter1397); } oprot.writeListEnd(); } @@ -160550,9 +161011,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1358 : struct.group_names) + for (String _iter1398 : struct.group_names) { - oprot.writeString(_iter1358); + oprot.writeString(_iter1398); } } } @@ -160573,13 +161034,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1359 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1359.size); - String _elem1360; - for (int _i1361 = 0; _i1361 < _list1359.size; ++_i1361) + org.apache.thrift.protocol.TList _list1399 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1399.size); + String _elem1400; + for (int _i1401 = 0; _i1401 < _list1399.size; ++_i1401) { - _elem1360 = iprot.readString(); - struct.group_names.add(_elem1360); + _elem1400 = iprot.readString(); + struct.group_names.add(_elem1400); } } struct.setGroup_namesIsSet(true); @@ -162037,14 +162498,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1362 = iprot.readListBegin(); - struct.success = new ArrayList(_list1362.size); - HiveObjectPrivilege _elem1363; - for (int _i1364 = 0; _i1364 < _list1362.size; ++_i1364) + org.apache.thrift.protocol.TList _list1402 = iprot.readListBegin(); + struct.success = new ArrayList(_list1402.size); + HiveObjectPrivilege _elem1403; + for (int _i1404 = 0; _i1404 < _list1402.size; ++_i1404) { - _elem1363 = new HiveObjectPrivilege(); - _elem1363.read(iprot); - struct.success.add(_elem1363); + _elem1403 = new HiveObjectPrivilege(); + _elem1403.read(iprot); + struct.success.add(_elem1403); } iprot.readListEnd(); } @@ -162079,9 +162540,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1365 : struct.success) + for (HiveObjectPrivilege _iter1405 : struct.success) { - _iter1365.write(oprot); + _iter1405.write(oprot); } oprot.writeListEnd(); } @@ -162120,9 +162581,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1366 : struct.success) + for (HiveObjectPrivilege _iter1406 : struct.success) { - _iter1366.write(oprot); + _iter1406.write(oprot); } } } @@ -162137,14 +162598,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1367 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1367.size); - HiveObjectPrivilege _elem1368; - for (int _i1369 = 0; _i1369 < _list1367.size; ++_i1369) + org.apache.thrift.protocol.TList _list1407 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1407.size); + HiveObjectPrivilege _elem1408; + for (int _i1409 = 0; _i1409 < _list1407.size; ++_i1409) { - _elem1368 = new HiveObjectPrivilege(); - _elem1368.read(iprot); - struct.success.add(_elem1368); + _elem1408 = new HiveObjectPrivilege(); + _elem1408.read(iprot); + struct.success.add(_elem1408); } } struct.setSuccessIsSet(true); @@ -165046,13 +165507,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1370 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1370.size); - String _elem1371; - for (int _i1372 = 0; _i1372 < _list1370.size; ++_i1372) + org.apache.thrift.protocol.TList _list1410 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1410.size); + String _elem1411; + for (int _i1412 = 0; _i1412 < _list1410.size; ++_i1412) { - _elem1371 = iprot.readString(); - struct.group_names.add(_elem1371); + _elem1411 = iprot.readString(); + struct.group_names.add(_elem1411); } iprot.readListEnd(); } @@ -165083,9 +165544,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1373 : struct.group_names) + for (String _iter1413 : struct.group_names) { - oprot.writeString(_iter1373); + oprot.writeString(_iter1413); } oprot.writeListEnd(); } @@ -165122,9 +165583,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1374 : struct.group_names) + for (String _iter1414 : struct.group_names) { - oprot.writeString(_iter1374); + oprot.writeString(_iter1414); } } } @@ -165140,13 +165601,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1375 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1375.size); - String _elem1376; - for (int _i1377 = 0; _i1377 < _list1375.size; ++_i1377) + org.apache.thrift.protocol.TList _list1415 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1415.size); + String _elem1416; + for (int _i1417 = 0; _i1417 < _list1415.size; ++_i1417) { - _elem1376 = iprot.readString(); - struct.group_names.add(_elem1376); + _elem1416 = iprot.readString(); + struct.group_names.add(_elem1416); } } struct.setGroup_namesIsSet(true); @@ -165549,13 +166010,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1378 = iprot.readListBegin(); - struct.success = new ArrayList(_list1378.size); - String _elem1379; - for (int _i1380 = 0; _i1380 < _list1378.size; ++_i1380) + org.apache.thrift.protocol.TList _list1418 = iprot.readListBegin(); + struct.success = new ArrayList(_list1418.size); + String _elem1419; + for (int _i1420 = 0; _i1420 < _list1418.size; ++_i1420) { - _elem1379 = iprot.readString(); - struct.success.add(_elem1379); + _elem1419 = iprot.readString(); + struct.success.add(_elem1419); } iprot.readListEnd(); } @@ -165590,9 +166051,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1381 : struct.success) + for (String _iter1421 : struct.success) { - oprot.writeString(_iter1381); + oprot.writeString(_iter1421); } oprot.writeListEnd(); } @@ -165631,9 +166092,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1382 : struct.success) + for (String _iter1422 : struct.success) { - oprot.writeString(_iter1382); + oprot.writeString(_iter1422); } } } @@ -165648,13 +166109,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1383 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1383.size); - String _elem1384; - for (int _i1385 = 0; _i1385 < _list1383.size; ++_i1385) + org.apache.thrift.protocol.TList _list1423 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1423.size); + String _elem1424; + for (int _i1425 = 0; _i1425 < _list1423.size; ++_i1425) { - _elem1384 = iprot.readString(); - struct.success.add(_elem1384); + _elem1424 = iprot.readString(); + struct.success.add(_elem1424); } } struct.setSuccessIsSet(true); @@ -170945,13 +171406,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1386 = iprot.readListBegin(); - struct.success = new ArrayList(_list1386.size); - String _elem1387; - for (int _i1388 = 0; _i1388 < _list1386.size; ++_i1388) + org.apache.thrift.protocol.TList _list1426 = iprot.readListBegin(); + struct.success = new ArrayList(_list1426.size); + String _elem1427; + for (int _i1428 = 0; _i1428 < _list1426.size; ++_i1428) { - _elem1387 = iprot.readString(); - struct.success.add(_elem1387); + _elem1427 = iprot.readString(); + struct.success.add(_elem1427); } iprot.readListEnd(); } @@ -170977,9 +171438,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1389 : struct.success) + for (String _iter1429 : struct.success) { - oprot.writeString(_iter1389); + oprot.writeString(_iter1429); } oprot.writeListEnd(); } @@ -171010,9 +171471,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1390 : struct.success) + for (String _iter1430 : struct.success) { - oprot.writeString(_iter1390); + oprot.writeString(_iter1430); } } } @@ -171024,13 +171485,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1391 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1391.size); - String _elem1392; - for (int _i1393 = 0; _i1393 < _list1391.size; ++_i1393) + org.apache.thrift.protocol.TList _list1431 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1431.size); + String _elem1432; + for (int _i1433 = 0; _i1433 < _list1431.size; ++_i1433) { - _elem1392 = iprot.readString(); - struct.success.add(_elem1392); + _elem1432 = iprot.readString(); + struct.success.add(_elem1432); } } struct.setSuccessIsSet(true); @@ -173385,11 +173846,1879 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("remove_master_key_result("); + StringBuilder sb = new StringBuilder("remove_master_key_result("); + boolean first = true; + + sb.append("success:"); + sb.append(this.success); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class remove_master_key_resultStandardSchemeFactory implements SchemeFactory { + public remove_master_key_resultStandardScheme getScheme() { + return new remove_master_key_resultStandardScheme(); + } + } + + private static class remove_master_key_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, remove_master_key_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, remove_master_key_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeBool(struct.success); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class remove_master_key_resultTupleSchemeFactory implements SchemeFactory { + public remove_master_key_resultTupleScheme getScheme() { + return new remove_master_key_resultTupleScheme(); + } + } + + private static class remove_master_key_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, remove_master_key_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + oprot.writeBool(struct.success); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, remove_master_key_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_master_keys_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_master_keys_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_master_keys_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_master_keys_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_master_keys_args.class, metaDataMap); + } + + public get_master_keys_args() { + } + + /** + * Performs a deep copy on other. + */ + public get_master_keys_args(get_master_keys_args other) { + } + + public get_master_keys_args deepCopy() { + return new get_master_keys_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_master_keys_args) + return this.equals((get_master_keys_args)that); + return false; + } + + public boolean equals(get_master_keys_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(get_master_keys_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_master_keys_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_master_keys_argsStandardSchemeFactory implements SchemeFactory { + public get_master_keys_argsStandardScheme getScheme() { + return new get_master_keys_argsStandardScheme(); + } + } + + private static class get_master_keys_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_master_keys_argsTupleSchemeFactory implements SchemeFactory { + public get_master_keys_argsTupleScheme getScheme() { + return new get_master_keys_argsTupleScheme(); + } + } + + private static class get_master_keys_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_master_keys_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_master_keys_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_master_keys_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_master_keys_resultTupleSchemeFactory()); + } + + private List success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_master_keys_result.class, metaDataMap); + } + + public get_master_keys_result() { + } + + public get_master_keys_result( + List success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public get_master_keys_result(get_master_keys_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(other.success); + this.success = __this__success; + } + } + + public get_master_keys_result deepCopy() { + return new get_master_keys_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(String elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_master_keys_result) + return this.equals((get_master_keys_result)that); + return false; + } + + public boolean equals(get_master_keys_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + return list.hashCode(); + } + + @Override + public int compareTo(get_master_keys_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_master_keys_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_master_keys_resultStandardSchemeFactory implements SchemeFactory { + public get_master_keys_resultStandardScheme getScheme() { + return new get_master_keys_resultStandardScheme(); + } + } + + private static class get_master_keys_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1434 = iprot.readListBegin(); + struct.success = new ArrayList(_list1434.size); + String _elem1435; + for (int _i1436 = 0; _i1436 < _list1434.size; ++_i1436) + { + _elem1435 = iprot.readString(); + struct.success.add(_elem1435); + } + iprot.readListEnd(); + } + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); + for (String _iter1437 : struct.success) + { + oprot.writeString(_iter1437); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_master_keys_resultTupleSchemeFactory implements SchemeFactory { + public get_master_keys_resultTupleScheme getScheme() { + return new get_master_keys_resultTupleScheme(); + } + } + + private static class get_master_keys_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + { + oprot.writeI32(struct.success.size()); + for (String _iter1438 : struct.success) + { + oprot.writeString(_iter1438); + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list1439 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1439.size); + String _elem1440; + for (int _i1441 = 0; _i1441 < _list1439.size; ++_i1441) + { + _elem1440 = iprot.readString(); + struct.success.add(_elem1440); + } + } + struct.setSuccessIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_open_txns_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_open_txns_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_args.class, metaDataMap); + } + + public get_open_txns_args() { + } + + /** + * Performs a deep copy on other. + */ + public get_open_txns_args(get_open_txns_args other) { + } + + public get_open_txns_args deepCopy() { + return new get_open_txns_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_open_txns_args) + return this.equals((get_open_txns_args)that); + return false; + } + + public boolean equals(get_open_txns_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(get_open_txns_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_open_txns_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_open_txns_argsStandardSchemeFactory implements SchemeFactory { + public get_open_txns_argsStandardScheme getScheme() { + return new get_open_txns_argsStandardScheme(); + } + } + + private static class get_open_txns_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_open_txns_argsTupleSchemeFactory implements SchemeFactory { + public get_open_txns_argsTupleScheme getScheme() { + return new get_open_txns_argsTupleScheme(); + } + } + + private static class get_open_txns_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_open_txns_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_open_txns_resultTupleSchemeFactory()); + } + + private GetOpenTxnsResponse success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetOpenTxnsResponse.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_result.class, metaDataMap); + } + + public get_open_txns_result() { + } + + public get_open_txns_result( + GetOpenTxnsResponse success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public get_open_txns_result(get_open_txns_result other) { + if (other.isSetSuccess()) { + this.success = new GetOpenTxnsResponse(other.success); + } + } + + public get_open_txns_result deepCopy() { + return new get_open_txns_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public GetOpenTxnsResponse getSuccess() { + return this.success; + } + + public void setSuccess(GetOpenTxnsResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((GetOpenTxnsResponse)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_open_txns_result) + return this.equals((get_open_txns_result)that); + return false; + } + + public boolean equals(get_open_txns_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + return list.hashCode(); + } + + @Override + public int compareTo(get_open_txns_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_open_txns_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_open_txns_resultStandardSchemeFactory implements SchemeFactory { + public get_open_txns_resultStandardScheme getScheme() { + return new get_open_txns_resultStandardScheme(); + } + } + + private static class get_open_txns_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new GetOpenTxnsResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_open_txns_resultTupleSchemeFactory implements SchemeFactory { + public get_open_txns_resultTupleScheme getScheme() { + return new get_open_txns_resultTupleScheme(); + } + } + + private static class get_open_txns_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new GetOpenTxnsResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_info_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_info_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_open_txns_info_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_open_txns_info_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_info_args.class, metaDataMap); + } + + public get_open_txns_info_args() { + } + + /** + * Performs a deep copy on other. + */ + public get_open_txns_info_args(get_open_txns_info_args other) { + } + + public get_open_txns_info_args deepCopy() { + return new get_open_txns_info_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_open_txns_info_args) + return this.equals((get_open_txns_info_args)that); + return false; + } + + public boolean equals(get_open_txns_info_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(get_open_txns_info_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_open_txns_info_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_open_txns_info_argsStandardSchemeFactory implements SchemeFactory { + public get_open_txns_info_argsStandardScheme getScheme() { + return new get_open_txns_info_argsStandardScheme(); + } + } + + private static class get_open_txns_info_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_info_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_open_txns_info_argsTupleSchemeFactory implements SchemeFactory { + public get_open_txns_info_argsTupleScheme getScheme() { + return new get_open_txns_info_argsTupleScheme(); + } + } + + private static class get_open_txns_info_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_info_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_info_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_open_txns_info_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_open_txns_info_resultTupleSchemeFactory()); + } + + private GetOpenTxnsInfoResponse success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetOpenTxnsInfoResponse.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_info_result.class, metaDataMap); + } + + public get_open_txns_info_result() { + } + + public get_open_txns_info_result( + GetOpenTxnsInfoResponse success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public get_open_txns_info_result(get_open_txns_info_result other) { + if (other.isSetSuccess()) { + this.success = new GetOpenTxnsInfoResponse(other.success); + } + } + + public get_open_txns_info_result deepCopy() { + return new get_open_txns_info_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public GetOpenTxnsInfoResponse getSuccess() { + return this.success; + } + + public void setSuccess(GetOpenTxnsInfoResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((GetOpenTxnsInfoResponse)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_open_txns_info_result) + return this.equals((get_open_txns_info_result)that); + return false; + } + + public boolean equals(get_open_txns_info_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + return list.hashCode(); + } + + @Override + public int compareTo(get_open_txns_info_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_open_txns_info_result("); boolean first = true; sb.append("success:"); - sb.append(this.success); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } first = false; sb.append(")"); return sb.toString(); @@ -173398,6 +175727,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -173410,23 +175742,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class remove_master_key_resultStandardSchemeFactory implements SchemeFactory { - public remove_master_key_resultStandardScheme getScheme() { - return new remove_master_key_resultStandardScheme(); + private static class get_open_txns_info_resultStandardSchemeFactory implements SchemeFactory { + public get_open_txns_info_resultStandardScheme getScheme() { + return new get_open_txns_info_resultStandardScheme(); } } - private static class remove_master_key_resultStandardScheme extends StandardScheme { + private static class get_open_txns_info_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, remove_master_key_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -173437,8 +175767,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, remove_master_key_r } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.success = iprot.readBool(); + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new GetOpenTxnsInfoResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -173453,13 +175784,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, remove_master_key_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, remove_master_key_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_info_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetSuccess()) { + if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeBool(struct.success); + struct.success.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -173468,16 +175799,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, remove_master_key_ } - private static class remove_master_key_resultTupleSchemeFactory implements SchemeFactory { - public remove_master_key_resultTupleScheme getScheme() { - return new remove_master_key_resultTupleScheme(); + private static class get_open_txns_info_resultTupleSchemeFactory implements SchemeFactory { + public get_open_txns_info_resultTupleScheme getScheme() { + return new get_open_txns_info_resultTupleScheme(); } } - private static class remove_master_key_resultTupleScheme extends TupleScheme { + private static class get_open_txns_info_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, remove_master_key_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -173485,16 +175816,17 @@ public void write(org.apache.thrift.protocol.TProtocol prot, remove_master_key_r } oprot.writeBitSet(optionals, 1); if (struct.isSetSuccess()) { - oprot.writeBool(struct.success); + struct.success.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, remove_master_key_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = iprot.readBool(); + struct.success = new GetOpenTxnsInfoResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } } @@ -173502,20 +175834,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, remove_master_key_re } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_master_keys_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_master_keys_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class open_txns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("open_txns_args"); + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_master_keys_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_master_keys_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new open_txns_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new open_txns_argsTupleSchemeFactory()); } + private OpenTxnRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + RQST((short)1, "rqst"); private static final Map byName = new HashMap(); @@ -173530,6 +175864,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, remove_master_key_re */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 1: // RQST + return RQST; default: return null; } @@ -173568,37 +175904,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, OpenTxnRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_master_keys_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(open_txns_args.class, metaDataMap); } - public get_master_keys_args() { + public open_txns_args() { + } + + public open_txns_args( + OpenTxnRequest rqst) + { + this(); + this.rqst = rqst; } /** * Performs a deep copy on other. */ - public get_master_keys_args(get_master_keys_args other) { + public open_txns_args(open_txns_args other) { + if (other.isSetRqst()) { + this.rqst = new OpenTxnRequest(other.rqst); + } } - public get_master_keys_args deepCopy() { - return new get_master_keys_args(this); + public open_txns_args deepCopy() { + return new open_txns_args(this); } @Override public void clear() { + this.rqst = null; + } + + public OpenTxnRequest getRqst() { + return this.rqst; + } + + public void setRqst(OpenTxnRequest rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((OpenTxnRequest)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case RQST: + return getRqst(); + } throw new IllegalStateException(); } @@ -173610,6 +175995,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case RQST: + return isSetRqst(); } throw new IllegalStateException(); } @@ -173618,15 +176005,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_master_keys_args) - return this.equals((get_master_keys_args)that); + if (that instanceof open_txns_args) + return this.equals((open_txns_args)that); return false; } - public boolean equals(get_master_keys_args that) { + public boolean equals(open_txns_args that) { if (that == null) return false; + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + return true; } @@ -173634,17 +176030,32 @@ public boolean equals(get_master_keys_args that) { public int hashCode() { List list = new ArrayList(); + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + return list.hashCode(); } @Override - public int compareTo(get_master_keys_args other) { + public int compareTo(open_txns_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -173662,9 +176073,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_master_keys_args("); + StringBuilder sb = new StringBuilder("open_txns_args("); boolean first = true; + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; sb.append(")"); return sb.toString(); } @@ -173672,6 +176090,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -173690,15 +176111,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_master_keys_argsStandardSchemeFactory implements SchemeFactory { - public get_master_keys_argsStandardScheme getScheme() { - return new get_master_keys_argsStandardScheme(); + private static class open_txns_argsStandardSchemeFactory implements SchemeFactory { + public open_txns_argsStandardScheme getScheme() { + return new open_txns_argsStandardScheme(); } } - private static class get_master_keys_argsStandardScheme extends StandardScheme { + private static class open_txns_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -173708,6 +176129,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_arg break; } switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new OpenTxnRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -173717,49 +176147,68 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_arg struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, open_txns_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_master_keys_argsTupleSchemeFactory implements SchemeFactory { - public get_master_keys_argsTupleScheme getScheme() { - return new get_master_keys_argsTupleScheme(); + private static class open_txns_argsTupleSchemeFactory implements SchemeFactory { + public open_txns_argsTupleScheme getScheme() { + return new open_txns_argsTupleScheme(); } } - private static class get_master_keys_argsTupleScheme extends TupleScheme { + private static class open_txns_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, open_txns_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, open_txns_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new OpenTxnRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_master_keys_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_master_keys_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class open_txns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("open_txns_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_master_keys_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_master_keys_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new open_txns_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new open_txns_resultTupleSchemeFactory()); } - private List success; // required + private OpenTxnsResponse success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -173824,17 +176273,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, OpenTxnsResponse.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_master_keys_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(open_txns_result.class, metaDataMap); } - public get_master_keys_result() { + public open_txns_result() { } - public get_master_keys_result( - List success) + public open_txns_result( + OpenTxnsResponse success) { this(); this.success = success; @@ -173843,15 +176291,14 @@ public get_master_keys_result( /** * Performs a deep copy on other. */ - public get_master_keys_result(get_master_keys_result other) { + public open_txns_result(open_txns_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success); - this.success = __this__success; + this.success = new OpenTxnsResponse(other.success); } } - public get_master_keys_result deepCopy() { - return new get_master_keys_result(this); + public open_txns_result deepCopy() { + return new open_txns_result(this); } @Override @@ -173859,26 +176306,11 @@ public void clear() { this.success = null; } - public int getSuccessSize() { - return (this.success == null) ? 0 : this.success.size(); - } - - public java.util.Iterator getSuccessIterator() { - return (this.success == null) ? null : this.success.iterator(); - } - - public void addToSuccess(String elem) { - if (this.success == null) { - this.success = new ArrayList(); - } - this.success.add(elem); - } - - public List getSuccess() { + public OpenTxnsResponse getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(OpenTxnsResponse success) { this.success = success; } @@ -173903,7 +176335,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); + setSuccess((OpenTxnsResponse)value); } break; @@ -173936,12 +176368,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_master_keys_result) - return this.equals((get_master_keys_result)that); + if (that instanceof open_txns_result) + return this.equals((open_txns_result)that); return false; } - public boolean equals(get_master_keys_result that) { + public boolean equals(open_txns_result that) { if (that == null) return false; @@ -173970,7 +176402,7 @@ public int hashCode() { } @Override - public int compareTo(get_master_keys_result other) { + public int compareTo(open_txns_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -174004,7 +176436,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_master_keys_result("); + StringBuilder sb = new StringBuilder("open_txns_result("); boolean first = true; sb.append("success:"); @@ -174021,6 +176453,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -174039,15 +176474,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_master_keys_resultStandardSchemeFactory implements SchemeFactory { - public get_master_keys_resultStandardScheme getScheme() { - return new get_master_keys_resultStandardScheme(); + private static class open_txns_resultStandardSchemeFactory implements SchemeFactory { + public open_txns_resultStandardScheme getScheme() { + return new open_txns_resultStandardScheme(); } } - private static class get_master_keys_resultStandardScheme extends StandardScheme { + private static class open_txns_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174058,18 +176493,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list1394 = iprot.readListBegin(); - struct.success = new ArrayList(_list1394.size); - String _elem1395; - for (int _i1396 = 0; _i1396 < _list1394.size; ++_i1396) - { - _elem1395 = iprot.readString(); - struct.success.add(_elem1395); - } - iprot.readListEnd(); - } + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new OpenTxnsResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -174084,20 +176510,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, open_txns_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1397 : struct.success) - { - oprot.writeString(_iter1397); - } - oprot.writeListEnd(); - } + struct.success.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -174106,16 +176525,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re } - private static class get_master_keys_resultTupleSchemeFactory implements SchemeFactory { - public get_master_keys_resultTupleScheme getScheme() { - return new get_master_keys_resultTupleScheme(); + private static class open_txns_resultTupleSchemeFactory implements SchemeFactory { + public open_txns_resultTupleScheme getScheme() { + return new open_txns_resultTupleScheme(); } } - private static class get_master_keys_resultTupleScheme extends TupleScheme { + private static class open_txns_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, open_txns_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -174123,31 +176542,17 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res } oprot.writeBitSet(optionals, 1); if (struct.isSetSuccess()) { - { - oprot.writeI32(struct.success.size()); - for (String _iter1398 : struct.success) - { - oprot.writeString(_iter1398); - } - } + struct.success.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, open_txns_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list1399 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1399.size); - String _elem1400; - for (int _i1401 = 0; _i1401 < _list1399.size; ++_i1401) - { - _elem1400 = iprot.readString(); - struct.success.add(_elem1400); - } - } + struct.success = new OpenTxnsResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } } @@ -174155,20 +176560,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txn_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txn_args"); + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_open_txns_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_open_txns_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new abort_txn_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new abort_txn_argsTupleSchemeFactory()); } + private AbortTxnRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + RQST((short)1, "rqst"); private static final Map byName = new HashMap(); @@ -174183,6 +176590,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 1: // RQST + return RQST; default: return null; } @@ -174221,37 +176630,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AbortTxnRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txn_args.class, metaDataMap); } - public get_open_txns_args() { + public abort_txn_args() { + } + + public abort_txn_args( + AbortTxnRequest rqst) + { + this(); + this.rqst = rqst; } /** * Performs a deep copy on other. */ - public get_open_txns_args(get_open_txns_args other) { + public abort_txn_args(abort_txn_args other) { + if (other.isSetRqst()) { + this.rqst = new AbortTxnRequest(other.rqst); + } } - public get_open_txns_args deepCopy() { - return new get_open_txns_args(this); + public abort_txn_args deepCopy() { + return new abort_txn_args(this); } @Override public void clear() { + this.rqst = null; + } + + public AbortTxnRequest getRqst() { + return this.rqst; + } + + public void setRqst(AbortTxnRequest rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((AbortTxnRequest)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case RQST: + return getRqst(); + } throw new IllegalStateException(); } @@ -174263,6 +176721,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case RQST: + return isSetRqst(); } throw new IllegalStateException(); } @@ -174271,15 +176731,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_open_txns_args) - return this.equals((get_open_txns_args)that); + if (that instanceof abort_txn_args) + return this.equals((abort_txn_args)that); return false; } - public boolean equals(get_open_txns_args that) { + public boolean equals(abort_txn_args that) { if (that == null) return false; + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + return true; } @@ -174287,17 +176756,32 @@ public boolean equals(get_open_txns_args that) { public int hashCode() { List list = new ArrayList(); + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + return list.hashCode(); } @Override - public int compareTo(get_open_txns_args other) { + public int compareTo(abort_txn_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -174315,9 +176799,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_open_txns_args("); + StringBuilder sb = new StringBuilder("abort_txn_args("); boolean first = true; + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; sb.append(")"); return sb.toString(); } @@ -174325,6 +176816,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -174343,15 +176837,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_open_txns_argsStandardSchemeFactory implements SchemeFactory { - public get_open_txns_argsStandardScheme getScheme() { - return new get_open_txns_argsStandardScheme(); + private static class abort_txn_argsStandardSchemeFactory implements SchemeFactory { + public abort_txn_argsStandardScheme getScheme() { + return new abort_txn_argsStandardScheme(); } } - private static class get_open_txns_argsStandardScheme extends StandardScheme { + private static class abort_txn_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174361,6 +176855,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_args break; } switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new AbortTxnRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -174370,53 +176873,72 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_args struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txn_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_open_txns_argsTupleSchemeFactory implements SchemeFactory { - public get_open_txns_argsTupleScheme getScheme() { - return new get_open_txns_argsTupleScheme(); + private static class abort_txn_argsTupleSchemeFactory implements SchemeFactory { + public abort_txn_argsTupleScheme getScheme() { + return new abort_txn_argsTupleScheme(); } } - private static class get_open_txns_argsTupleScheme extends TupleScheme { + private static class abort_txn_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, abort_txn_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, abort_txn_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new AbortTxnRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txn_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txn_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_open_txns_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_open_txns_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new abort_txn_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new abort_txn_resultTupleSchemeFactory()); } - private GetOpenTxnsResponse success; // required + private NoSuchTxnException o1; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); + O1((short)1, "o1"); private static final Map byName = new HashMap(); @@ -174431,8 +176953,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_args s */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; + case 1: // O1 + return O1; default: return null; } @@ -174476,70 +176998,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetOpenTxnsResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txn_result.class, metaDataMap); } - public get_open_txns_result() { + public abort_txn_result() { } - public get_open_txns_result( - GetOpenTxnsResponse success) + public abort_txn_result( + NoSuchTxnException o1) { this(); - this.success = success; + this.o1 = o1; } /** * Performs a deep copy on other. */ - public get_open_txns_result(get_open_txns_result other) { - if (other.isSetSuccess()) { - this.success = new GetOpenTxnsResponse(other.success); + public abort_txn_result(abort_txn_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchTxnException(other.o1); } } - public get_open_txns_result deepCopy() { - return new get_open_txns_result(this); + public abort_txn_result deepCopy() { + return new abort_txn_result(this); } @Override public void clear() { - this.success = null; + this.o1 = null; } - public GetOpenTxnsResponse getSuccess() { - return this.success; + public NoSuchTxnException getO1() { + return this.o1; } - public void setSuccess(GetOpenTxnsResponse success) { - this.success = success; + public void setO1(NoSuchTxnException o1) { + this.o1 = o1; } - public void unsetSuccess() { - this.success = null; + public void unsetO1() { + this.o1 = null; } - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; } - public void setSuccessIsSet(boolean value) { + public void setO1IsSet(boolean value) { if (!value) { - this.success = null; + this.o1 = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case SUCCESS: + case O1: if (value == null) { - unsetSuccess(); + unsetO1(); } else { - setSuccess((GetOpenTxnsResponse)value); + setO1((NoSuchTxnException)value); } break; @@ -174548,8 +177070,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case SUCCESS: - return getSuccess(); + case O1: + return getO1(); } throw new IllegalStateException(); @@ -174562,8 +177084,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case SUCCESS: - return isSetSuccess(); + case O1: + return isSetO1(); } throw new IllegalStateException(); } @@ -174572,21 +177094,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_open_txns_result) - return this.equals((get_open_txns_result)that); + if (that instanceof abort_txn_result) + return this.equals((abort_txn_result)that); return false; } - public boolean equals(get_open_txns_result that) { + public boolean equals(abort_txn_result that) { if (that == null) return false; - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) return false; - if (!this.success.equals(that.success)) + if (!this.o1.equals(that.o1)) return false; } @@ -174597,28 +177119,28 @@ public boolean equals(get_open_txns_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); return list.hashCode(); } @Override - public int compareTo(get_open_txns_result other) { + public int compareTo(abort_txn_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); if (lastComparison != 0) { return lastComparison; } @@ -174640,14 +177162,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_open_txns_result("); + StringBuilder sb = new StringBuilder("abort_txn_result("); boolean first = true; - sb.append("success:"); - if (this.success == null) { + sb.append("o1:"); + if (this.o1 == null) { sb.append("null"); } else { - sb.append(this.success); + sb.append(this.o1); } first = false; sb.append(")"); @@ -174657,9 +177179,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (success != null) { - success.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -174678,15 +177197,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_open_txns_resultStandardSchemeFactory implements SchemeFactory { - public get_open_txns_resultStandardScheme getScheme() { - return new get_open_txns_resultStandardScheme(); + private static class abort_txn_resultStandardSchemeFactory implements SchemeFactory { + public abort_txn_resultStandardScheme getScheme() { + return new abort_txn_resultStandardScheme(); } } - private static class get_open_txns_resultStandardScheme extends StandardScheme { + private static class abort_txn_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174696,11 +177215,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_resul break; } switch (schemeField.id) { - case 0: // SUCCESS + case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new GetOpenTxnsResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -174714,13 +177233,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_resul struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txn_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -174729,55 +177248,57 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_resu } - private static class get_open_txns_resultTupleSchemeFactory implements SchemeFactory { - public get_open_txns_resultTupleScheme getScheme() { - return new get_open_txns_resultTupleScheme(); + private static class abort_txn_resultTupleSchemeFactory implements SchemeFactory { + public abort_txn_resultTupleScheme getScheme() { + return new abort_txn_resultTupleScheme(); } } - private static class get_open_txns_resultTupleScheme extends TupleScheme { + private static class abort_txn_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, abort_txn_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { + if (struct.isSetO1()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); + if (struct.isSetO1()) { + struct.o1.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, abort_txn_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new GetOpenTxnsResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_info_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_info_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txns_args"); + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_open_txns_info_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_open_txns_info_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new abort_txns_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new abort_txns_argsTupleSchemeFactory()); } + private AbortTxnsRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + RQST((short)1, "rqst"); private static final Map byName = new HashMap(); @@ -174792,6 +177313,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_result */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 1: // RQST + return RQST; default: return null; } @@ -174830,37 +177353,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AbortTxnsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_info_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txns_args.class, metaDataMap); } - public get_open_txns_info_args() { + public abort_txns_args() { + } + + public abort_txns_args( + AbortTxnsRequest rqst) + { + this(); + this.rqst = rqst; } /** * Performs a deep copy on other. */ - public get_open_txns_info_args(get_open_txns_info_args other) { + public abort_txns_args(abort_txns_args other) { + if (other.isSetRqst()) { + this.rqst = new AbortTxnsRequest(other.rqst); + } } - public get_open_txns_info_args deepCopy() { - return new get_open_txns_info_args(this); + public abort_txns_args deepCopy() { + return new abort_txns_args(this); } @Override public void clear() { + this.rqst = null; + } + + public AbortTxnsRequest getRqst() { + return this.rqst; + } + + public void setRqst(AbortTxnsRequest rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((AbortTxnsRequest)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case RQST: + return getRqst(); + } throw new IllegalStateException(); } @@ -174872,6 +177444,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case RQST: + return isSetRqst(); } throw new IllegalStateException(); } @@ -174880,15 +177454,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_open_txns_info_args) - return this.equals((get_open_txns_info_args)that); + if (that instanceof abort_txns_args) + return this.equals((abort_txns_args)that); return false; } - public boolean equals(get_open_txns_info_args that) { + public boolean equals(abort_txns_args that) { if (that == null) return false; + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + return true; } @@ -174896,17 +177479,32 @@ public boolean equals(get_open_txns_info_args that) { public int hashCode() { List list = new ArrayList(); + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + return list.hashCode(); } @Override - public int compareTo(get_open_txns_info_args other) { + public int compareTo(abort_txns_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -174924,9 +177522,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_open_txns_info_args("); + StringBuilder sb = new StringBuilder("abort_txns_args("); boolean first = true; + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; sb.append(")"); return sb.toString(); } @@ -174934,6 +177539,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -174952,15 +177560,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_open_txns_info_argsStandardSchemeFactory implements SchemeFactory { - public get_open_txns_info_argsStandardScheme getScheme() { - return new get_open_txns_info_argsStandardScheme(); + private static class abort_txns_argsStandardSchemeFactory implements SchemeFactory { + public abort_txns_argsStandardScheme getScheme() { + return new abort_txns_argsStandardScheme(); } } - private static class get_open_txns_info_argsStandardScheme extends StandardScheme { + private static class abort_txns_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174970,6 +177578,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_ break; } switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new AbortTxnsRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -174979,53 +177596,72 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_info_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txns_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_open_txns_info_argsTupleSchemeFactory implements SchemeFactory { - public get_open_txns_info_argsTupleScheme getScheme() { - return new get_open_txns_info_argsTupleScheme(); + private static class abort_txns_argsTupleSchemeFactory implements SchemeFactory { + public abort_txns_argsTupleScheme getScheme() { + return new abort_txns_argsTupleScheme(); } } - private static class get_open_txns_info_argsTupleScheme extends TupleScheme { + private static class abort_txns_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, abort_txns_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, abort_txns_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new AbortTxnsRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_txns_info_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_txns_info_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txns_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_open_txns_info_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_open_txns_info_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new abort_txns_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new abort_txns_resultTupleSchemeFactory()); } - private GetOpenTxnsInfoResponse success; // required + private NoSuchTxnException o1; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); + O1((short)1, "o1"); private static final Map byName = new HashMap(); @@ -175040,8 +177676,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_a */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; + case 1: // O1 + return O1; default: return null; } @@ -175085,70 +177721,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetOpenTxnsInfoResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_txns_info_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txns_result.class, metaDataMap); } - public get_open_txns_info_result() { + public abort_txns_result() { } - public get_open_txns_info_result( - GetOpenTxnsInfoResponse success) + public abort_txns_result( + NoSuchTxnException o1) { this(); - this.success = success; + this.o1 = o1; } /** * Performs a deep copy on other. */ - public get_open_txns_info_result(get_open_txns_info_result other) { - if (other.isSetSuccess()) { - this.success = new GetOpenTxnsInfoResponse(other.success); + public abort_txns_result(abort_txns_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchTxnException(other.o1); } } - public get_open_txns_info_result deepCopy() { - return new get_open_txns_info_result(this); + public abort_txns_result deepCopy() { + return new abort_txns_result(this); } @Override public void clear() { - this.success = null; + this.o1 = null; } - public GetOpenTxnsInfoResponse getSuccess() { - return this.success; + public NoSuchTxnException getO1() { + return this.o1; } - public void setSuccess(GetOpenTxnsInfoResponse success) { - this.success = success; + public void setO1(NoSuchTxnException o1) { + this.o1 = o1; } - public void unsetSuccess() { - this.success = null; + public void unsetO1() { + this.o1 = null; } - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; } - public void setSuccessIsSet(boolean value) { + public void setO1IsSet(boolean value) { if (!value) { - this.success = null; + this.o1 = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case SUCCESS: + case O1: if (value == null) { - unsetSuccess(); + unsetO1(); } else { - setSuccess((GetOpenTxnsInfoResponse)value); + setO1((NoSuchTxnException)value); } break; @@ -175157,8 +177793,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case SUCCESS: - return getSuccess(); + case O1: + return getO1(); } throw new IllegalStateException(); @@ -175171,8 +177807,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case SUCCESS: - return isSetSuccess(); + case O1: + return isSetO1(); } throw new IllegalStateException(); } @@ -175181,21 +177817,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_open_txns_info_result) - return this.equals((get_open_txns_info_result)that); + if (that instanceof abort_txns_result) + return this.equals((abort_txns_result)that); return false; } - public boolean equals(get_open_txns_info_result that) { + public boolean equals(abort_txns_result that) { if (that == null) return false; - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) return false; - if (!this.success.equals(that.success)) + if (!this.o1.equals(that.o1)) return false; } @@ -175206,28 +177842,28 @@ public boolean equals(get_open_txns_info_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); return list.hashCode(); } @Override - public int compareTo(get_open_txns_info_result other) { + public int compareTo(abort_txns_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); if (lastComparison != 0) { return lastComparison; } @@ -175249,14 +177885,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_open_txns_info_result("); + StringBuilder sb = new StringBuilder("abort_txns_result("); boolean first = true; - sb.append("success:"); - if (this.success == null) { + sb.append("o1:"); + if (this.o1 == null) { sb.append("null"); } else { - sb.append(this.success); + sb.append(this.o1); } first = false; sb.append(")"); @@ -175266,9 +177902,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (success != null) { - success.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -175287,15 +177920,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_open_txns_info_resultStandardSchemeFactory implements SchemeFactory { - public get_open_txns_info_resultStandardScheme getScheme() { - return new get_open_txns_info_resultStandardScheme(); + private static class abort_txns_resultStandardSchemeFactory implements SchemeFactory { + public abort_txns_resultStandardScheme getScheme() { + return new abort_txns_resultStandardScheme(); } } - private static class get_open_txns_info_resultStandardScheme extends StandardScheme { + private static class abort_txns_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175305,11 +177938,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_ break; } switch (schemeField.id) { - case 0: // SUCCESS + case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new GetOpenTxnsInfoResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -175323,13 +177956,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_txns_info_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_info_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txns_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -175338,53 +177971,53 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_txns_info } - private static class get_open_txns_info_resultTupleSchemeFactory implements SchemeFactory { - public get_open_txns_info_resultTupleScheme getScheme() { - return new get_open_txns_info_resultTupleScheme(); + private static class abort_txns_resultTupleSchemeFactory implements SchemeFactory { + public abort_txns_resultTupleScheme getScheme() { + return new abort_txns_resultTupleScheme(); } } - private static class get_open_txns_info_resultTupleScheme extends TupleScheme { + private static class abort_txns_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, abort_txns_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { + if (struct.isSetO1()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); + if (struct.isSetO1()) { + struct.o1.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_open_txns_info_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, abort_txns_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new GetOpenTxnsInfoResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class open_txns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("open_txns_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class commit_txn_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("commit_txn_args"); private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new open_txns_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new open_txns_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new commit_txn_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new commit_txn_argsTupleSchemeFactory()); } - private OpenTxnRequest rqst; // required + private CommitTxnRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -175449,16 +178082,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, OpenTxnRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CommitTxnRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(open_txns_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(commit_txn_args.class, metaDataMap); } - public open_txns_args() { + public commit_txn_args() { } - public open_txns_args( - OpenTxnRequest rqst) + public commit_txn_args( + CommitTxnRequest rqst) { this(); this.rqst = rqst; @@ -175467,14 +178100,14 @@ public open_txns_args( /** * Performs a deep copy on other. */ - public open_txns_args(open_txns_args other) { + public commit_txn_args(commit_txn_args other) { if (other.isSetRqst()) { - this.rqst = new OpenTxnRequest(other.rqst); + this.rqst = new CommitTxnRequest(other.rqst); } } - public open_txns_args deepCopy() { - return new open_txns_args(this); + public commit_txn_args deepCopy() { + return new commit_txn_args(this); } @Override @@ -175482,11 +178115,11 @@ public void clear() { this.rqst = null; } - public OpenTxnRequest getRqst() { + public CommitTxnRequest getRqst() { return this.rqst; } - public void setRqst(OpenTxnRequest rqst) { + public void setRqst(CommitTxnRequest rqst) { this.rqst = rqst; } @@ -175511,7 +178144,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRqst(); } else { - setRqst((OpenTxnRequest)value); + setRqst((CommitTxnRequest)value); } break; @@ -175544,12 +178177,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof open_txns_args) - return this.equals((open_txns_args)that); + if (that instanceof commit_txn_args) + return this.equals((commit_txn_args)that); return false; } - public boolean equals(open_txns_args that) { + public boolean equals(commit_txn_args that) { if (that == null) return false; @@ -175578,7 +178211,7 @@ public int hashCode() { } @Override - public int compareTo(open_txns_args other) { + public int compareTo(commit_txn_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -175612,7 +178245,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("open_txns_args("); + StringBuilder sb = new StringBuilder("commit_txn_args("); boolean first = true; sb.append("rqst:"); @@ -175650,15 +178283,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class open_txns_argsStandardSchemeFactory implements SchemeFactory { - public open_txns_argsStandardScheme getScheme() { - return new open_txns_argsStandardScheme(); + private static class commit_txn_argsStandardSchemeFactory implements SchemeFactory { + public commit_txn_argsStandardScheme getScheme() { + return new commit_txn_argsStandardScheme(); } } - private static class open_txns_argsStandardScheme extends StandardScheme { + private static class commit_txn_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175670,7 +178303,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_args stru switch (schemeField.id) { case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new OpenTxnRequest(); + struct.rqst = new CommitTxnRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } else { @@ -175686,7 +178319,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_args stru struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, open_txns_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, commit_txn_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -175701,16 +178334,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, open_txns_args str } - private static class open_txns_argsTupleSchemeFactory implements SchemeFactory { - public open_txns_argsTupleScheme getScheme() { - return new open_txns_argsTupleScheme(); + private static class commit_txn_argsTupleSchemeFactory implements SchemeFactory { + public commit_txn_argsTupleScheme getScheme() { + return new commit_txn_argsTupleScheme(); } } - private static class open_txns_argsTupleScheme extends TupleScheme { + private static class commit_txn_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, open_txns_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, commit_txn_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRqst()) { @@ -175723,11 +178356,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, open_txns_args stru } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, open_txns_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, commit_txn_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new OpenTxnRequest(); + struct.rqst = new CommitTxnRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } @@ -175736,22 +178369,25 @@ public void read(org.apache.thrift.protocol.TProtocol prot, open_txns_args struc } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class open_txns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("open_txns_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class commit_txn_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("commit_txn_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new open_txns_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new open_txns_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new commit_txn_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new commit_txn_resultTupleSchemeFactory()); } - private OpenTxnsResponse success; // required + private NoSuchTxnException o1; // required + private TxnAbortedException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); + O1((short)1, "o1"), + O2((short)2, "o2"); private static final Map byName = new HashMap(); @@ -175766,8 +178402,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, open_txns_args struc */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; default: return null; } @@ -175811,70 +178449,109 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, OpenTxnsResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(open_txns_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(commit_txn_result.class, metaDataMap); } - public open_txns_result() { + public commit_txn_result() { } - public open_txns_result( - OpenTxnsResponse success) + public commit_txn_result( + NoSuchTxnException o1, + TxnAbortedException o2) { this(); - this.success = success; + this.o1 = o1; + this.o2 = o2; } /** * Performs a deep copy on other. */ - public open_txns_result(open_txns_result other) { - if (other.isSetSuccess()) { - this.success = new OpenTxnsResponse(other.success); + public commit_txn_result(commit_txn_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchTxnException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new TxnAbortedException(other.o2); } } - public open_txns_result deepCopy() { - return new open_txns_result(this); + public commit_txn_result deepCopy() { + return new commit_txn_result(this); } @Override public void clear() { - this.success = null; + this.o1 = null; + this.o2 = null; } - public OpenTxnsResponse getSuccess() { - return this.success; + public NoSuchTxnException getO1() { + return this.o1; } - public void setSuccess(OpenTxnsResponse success) { - this.success = success; + public void setO1(NoSuchTxnException o1) { + this.o1 = o1; } - public void unsetSuccess() { - this.success = null; + public void unsetO1() { + this.o1 = null; } - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; } - public void setSuccessIsSet(boolean value) { + public void setO1IsSet(boolean value) { if (!value) { - this.success = null; + this.o1 = null; + } + } + + public TxnAbortedException getO2() { + return this.o2; + } + + public void setO2(TxnAbortedException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case SUCCESS: + case O1: if (value == null) { - unsetSuccess(); + unsetO1(); } else { - setSuccess((OpenTxnsResponse)value); + setO1((NoSuchTxnException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((TxnAbortedException)value); } break; @@ -175883,8 +178560,11 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case SUCCESS: - return getSuccess(); + case O1: + return getO1(); + + case O2: + return getO2(); } throw new IllegalStateException(); @@ -175897,8 +178577,10 @@ public boolean isSet(_Fields field) { } switch (field) { - case SUCCESS: - return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); } throw new IllegalStateException(); } @@ -175907,21 +178589,30 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof open_txns_result) - return this.equals((open_txns_result)that); + if (that instanceof commit_txn_result) + return this.equals((commit_txn_result)that); return false; } - public boolean equals(open_txns_result that) { + public boolean equals(commit_txn_result that) { if (that == null) return false; - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) return false; - if (!this.success.equals(that.success)) + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) return false; } @@ -175932,28 +178623,43 @@ public boolean equals(open_txns_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); return list.hashCode(); } @Override - public int compareTo(open_txns_result other) { + public int compareTo(commit_txn_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); if (lastComparison != 0) { return lastComparison; } @@ -175975,14 +178681,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("open_txns_result("); + StringBuilder sb = new StringBuilder("commit_txn_result("); boolean first = true; - sb.append("success:"); - if (this.success == null) { + sb.append("o1:"); + if (this.o1 == null) { sb.append("null"); } else { - sb.append(this.success); + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); } first = false; sb.append(")"); @@ -175992,9 +178706,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (success != null) { - success.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -176013,15 +178724,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class open_txns_resultStandardSchemeFactory implements SchemeFactory { - public open_txns_resultStandardScheme getScheme() { - return new open_txns_resultStandardScheme(); + private static class commit_txn_resultStandardSchemeFactory implements SchemeFactory { + public commit_txn_resultStandardScheme getScheme() { + return new commit_txn_resultStandardScheme(); } } - private static class open_txns_resultStandardScheme extends StandardScheme { + private static class commit_txn_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -176031,11 +178742,20 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_result st break; } switch (schemeField.id) { - case 0: // SUCCESS + case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new OpenTxnsResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new TxnAbortedException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -176049,13 +178769,18 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, open_txns_result st struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, open_txns_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, commit_txn_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -176064,53 +178789,64 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, open_txns_result s } - private static class open_txns_resultTupleSchemeFactory implements SchemeFactory { - public open_txns_resultTupleScheme getScheme() { - return new open_txns_resultTupleScheme(); + private static class commit_txn_resultTupleSchemeFactory implements SchemeFactory { + public commit_txn_resultTupleScheme getScheme() { + return new commit_txn_resultTupleScheme(); } } - private static class open_txns_resultTupleScheme extends TupleScheme { + private static class commit_txn_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, open_txns_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, commit_txn_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { + if (struct.isSetO1()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); + if (struct.isSetO2()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, open_txns_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, commit_txn_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { - struct.success = new OpenTxnsResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new TxnAbortedException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txn_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txn_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_write_ids_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_write_ids_args"); private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new abort_txn_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new abort_txn_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_open_write_ids_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_open_write_ids_argsTupleSchemeFactory()); } - private AbortTxnRequest rqst; // required + private GetOpenWriteIdsRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -176175,16 +178911,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AbortTxnRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetOpenWriteIdsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txn_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_write_ids_args.class, metaDataMap); } - public abort_txn_args() { + public get_open_write_ids_args() { } - public abort_txn_args( - AbortTxnRequest rqst) + public get_open_write_ids_args( + GetOpenWriteIdsRequest rqst) { this(); this.rqst = rqst; @@ -176193,14 +178929,14 @@ public abort_txn_args( /** * Performs a deep copy on other. */ - public abort_txn_args(abort_txn_args other) { + public get_open_write_ids_args(get_open_write_ids_args other) { if (other.isSetRqst()) { - this.rqst = new AbortTxnRequest(other.rqst); + this.rqst = new GetOpenWriteIdsRequest(other.rqst); } } - public abort_txn_args deepCopy() { - return new abort_txn_args(this); + public get_open_write_ids_args deepCopy() { + return new get_open_write_ids_args(this); } @Override @@ -176208,11 +178944,11 @@ public void clear() { this.rqst = null; } - public AbortTxnRequest getRqst() { + public GetOpenWriteIdsRequest getRqst() { return this.rqst; } - public void setRqst(AbortTxnRequest rqst) { + public void setRqst(GetOpenWriteIdsRequest rqst) { this.rqst = rqst; } @@ -176237,7 +178973,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRqst(); } else { - setRqst((AbortTxnRequest)value); + setRqst((GetOpenWriteIdsRequest)value); } break; @@ -176270,12 +179006,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof abort_txn_args) - return this.equals((abort_txn_args)that); + if (that instanceof get_open_write_ids_args) + return this.equals((get_open_write_ids_args)that); return false; } - public boolean equals(abort_txn_args that) { + public boolean equals(get_open_write_ids_args that) { if (that == null) return false; @@ -176304,7 +179040,7 @@ public int hashCode() { } @Override - public int compareTo(abort_txn_args other) { + public int compareTo(get_open_write_ids_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -176338,7 +179074,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("abort_txn_args("); + StringBuilder sb = new StringBuilder("get_open_write_ids_args("); boolean first = true; sb.append("rqst:"); @@ -176376,15 +179112,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class abort_txn_argsStandardSchemeFactory implements SchemeFactory { - public abort_txn_argsStandardScheme getScheme() { - return new abort_txn_argsStandardScheme(); + private static class get_open_write_ids_argsStandardSchemeFactory implements SchemeFactory { + public get_open_write_ids_argsStandardScheme getScheme() { + return new get_open_write_ids_argsStandardScheme(); } } - private static class abort_txn_argsStandardScheme extends StandardScheme { + private static class get_open_write_ids_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_write_ids_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -176396,7 +179132,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_args stru switch (schemeField.id) { case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new AbortTxnRequest(); + struct.rqst = new GetOpenWriteIdsRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } else { @@ -176412,7 +179148,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_args stru struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txn_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_write_ids_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -176427,16 +179163,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txn_args str } - private static class abort_txn_argsTupleSchemeFactory implements SchemeFactory { - public abort_txn_argsTupleScheme getScheme() { - return new abort_txn_argsTupleScheme(); + private static class get_open_write_ids_argsTupleSchemeFactory implements SchemeFactory { + public get_open_write_ids_argsTupleScheme getScheme() { + return new get_open_write_ids_argsTupleScheme(); } } - private static class abort_txn_argsTupleScheme extends TupleScheme { + private static class get_open_write_ids_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, abort_txn_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_open_write_ids_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRqst()) { @@ -176449,11 +179185,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, abort_txn_args stru } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, abort_txn_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_open_write_ids_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new AbortTxnRequest(); + struct.rqst = new GetOpenWriteIdsRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } @@ -176462,22 +179198,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, abort_txn_args struc } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txn_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txn_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_open_write_ids_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_open_write_ids_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new abort_txn_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new abort_txn_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_open_write_ids_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_open_write_ids_resultTupleSchemeFactory()); } + private GetOpenWriteIdsResponse success; // required private NoSuchTxnException o1; // required + private MetaException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - O1((short)1, "o1"); + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); private static final Map byName = new HashMap(); @@ -176492,8 +179234,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, abort_txn_args struc */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; case 1: // O1 return O1; + case 2: // O2 + return O2; default: return null; } @@ -176537,38 +179283,77 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetOpenWriteIdsResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txn_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_open_write_ids_result.class, metaDataMap); } - public abort_txn_result() { + public get_open_write_ids_result() { } - public abort_txn_result( - NoSuchTxnException o1) + public get_open_write_ids_result( + GetOpenWriteIdsResponse success, + NoSuchTxnException o1, + MetaException o2) { this(); + this.success = success; this.o1 = o1; + this.o2 = o2; } /** * Performs a deep copy on other. */ - public abort_txn_result(abort_txn_result other) { + public get_open_write_ids_result(get_open_write_ids_result other) { + if (other.isSetSuccess()) { + this.success = new GetOpenWriteIdsResponse(other.success); + } if (other.isSetO1()) { this.o1 = new NoSuchTxnException(other.o1); } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } } - public abort_txn_result deepCopy() { - return new abort_txn_result(this); + public get_open_write_ids_result deepCopy() { + return new get_open_write_ids_result(this); } @Override public void clear() { + this.success = null; this.o1 = null; + this.o2 = null; + } + + public GetOpenWriteIdsResponse getSuccess() { + return this.success; + } + + public void setSuccess(GetOpenWriteIdsResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } } public NoSuchTxnException getO1() { @@ -176594,8 +179379,39 @@ public void setO1IsSet(boolean value) { } } + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((GetOpenWriteIdsResponse)value); + } + break; + case O1: if (value == null) { unsetO1(); @@ -176604,14 +179420,28 @@ public void setFieldValue(_Fields field, Object value) { } break; + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + case O1: return getO1(); + case O2: + return getO2(); + } throw new IllegalStateException(); } @@ -176623,8 +179453,12 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); case O1: return isSetO1(); + case O2: + return isSetO2(); } throw new IllegalStateException(); } @@ -176633,15 +179467,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof abort_txn_result) - return this.equals((abort_txn_result)that); + if (that instanceof get_open_write_ids_result) + return this.equals((get_open_write_ids_result)that); return false; } - public boolean equals(abort_txn_result that) { + public boolean equals(get_open_write_ids_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + boolean this_present_o1 = true && this.isSetO1(); boolean that_present_o1 = true && that.isSetO1(); if (this_present_o1 || that_present_o1) { @@ -176651,6 +179494,15 @@ public boolean equals(abort_txn_result that) { return false; } + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + return true; } @@ -176658,22 +179510,42 @@ public boolean equals(abort_txn_result that) { public int hashCode() { List list = new ArrayList(); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + boolean present_o1 = true && (isSetO1()); list.add(present_o1); if (present_o1) list.add(o1); + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + return list.hashCode(); } @Override - public int compareTo(abort_txn_result other) { + public int compareTo(get_open_write_ids_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; @@ -176684,6 +179556,16 @@ public int compareTo(abort_txn_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -176701,9 +179583,17 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("abort_txn_result("); + StringBuilder sb = new StringBuilder("get_open_write_ids_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); sb.append("o1:"); if (this.o1 == null) { sb.append("null"); @@ -176711,6 +179601,14 @@ public String toString() { sb.append(this.o1); } first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; sb.append(")"); return sb.toString(); } @@ -176718,6 +179616,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -176736,15 +179637,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class abort_txn_resultStandardSchemeFactory implements SchemeFactory { - public abort_txn_resultStandardScheme getScheme() { - return new abort_txn_resultStandardScheme(); + private static class get_open_write_ids_resultStandardSchemeFactory implements SchemeFactory { + public get_open_write_ids_resultStandardScheme getScheme() { + return new get_open_write_ids_resultStandardScheme(); } } - private static class abort_txn_resultStandardScheme extends StandardScheme { + private static class get_open_write_ids_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_open_write_ids_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -176754,6 +179655,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_result st break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new GetOpenWriteIdsResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.o1 = new NoSuchTxnException(); @@ -176763,6 +179673,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_result st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -176772,68 +179691,100 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txn_result st struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txn_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_open_write_ids_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } if (struct.o1 != null) { oprot.writeFieldBegin(O1_FIELD_DESC); struct.o1.write(oprot); oprot.writeFieldEnd(); } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class abort_txn_resultTupleSchemeFactory implements SchemeFactory { - public abort_txn_resultTupleScheme getScheme() { - return new abort_txn_resultTupleScheme(); + private static class get_open_write_ids_resultTupleSchemeFactory implements SchemeFactory { + public get_open_write_ids_resultTupleScheme getScheme() { + return new get_open_write_ids_resultTupleScheme(); } } - private static class abort_txn_resultTupleScheme extends TupleScheme { + private static class get_open_write_ids_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, abort_txn_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_open_write_ids_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetO1()) { + if (struct.isSetSuccess()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } if (struct.isSetO1()) { struct.o1.write(oprot); } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, abort_txn_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_open_write_ids_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { + struct.success = new GetOpenWriteIdsResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { struct.o1 = new NoSuchTxnException(); struct.o1.read(iprot); struct.setO1IsSet(true); } + if (incoming.get(2)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txns_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_transactional_table_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_transactional_table_args"); private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new abort_txns_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new abort_txns_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new add_transactional_table_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_transactional_table_argsTupleSchemeFactory()); } - private AbortTxnsRequest rqst; // required + private AddTransactionalTableRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -176898,16 +179849,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AbortTxnsRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AddTransactionalTableRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txns_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_transactional_table_args.class, metaDataMap); } - public abort_txns_args() { + public add_transactional_table_args() { } - public abort_txns_args( - AbortTxnsRequest rqst) + public add_transactional_table_args( + AddTransactionalTableRequest rqst) { this(); this.rqst = rqst; @@ -176916,14 +179867,14 @@ public abort_txns_args( /** * Performs a deep copy on other. */ - public abort_txns_args(abort_txns_args other) { + public add_transactional_table_args(add_transactional_table_args other) { if (other.isSetRqst()) { - this.rqst = new AbortTxnsRequest(other.rqst); + this.rqst = new AddTransactionalTableRequest(other.rqst); } } - public abort_txns_args deepCopy() { - return new abort_txns_args(this); + public add_transactional_table_args deepCopy() { + return new add_transactional_table_args(this); } @Override @@ -176931,11 +179882,11 @@ public void clear() { this.rqst = null; } - public AbortTxnsRequest getRqst() { + public AddTransactionalTableRequest getRqst() { return this.rqst; } - public void setRqst(AbortTxnsRequest rqst) { + public void setRqst(AddTransactionalTableRequest rqst) { this.rqst = rqst; } @@ -176960,7 +179911,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRqst(); } else { - setRqst((AbortTxnsRequest)value); + setRqst((AddTransactionalTableRequest)value); } break; @@ -176993,12 +179944,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof abort_txns_args) - return this.equals((abort_txns_args)that); + if (that instanceof add_transactional_table_args) + return this.equals((add_transactional_table_args)that); return false; } - public boolean equals(abort_txns_args that) { + public boolean equals(add_transactional_table_args that) { if (that == null) return false; @@ -177027,7 +179978,7 @@ public int hashCode() { } @Override - public int compareTo(abort_txns_args other) { + public int compareTo(add_transactional_table_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -177061,7 +180012,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("abort_txns_args("); + StringBuilder sb = new StringBuilder("add_transactional_table_args("); boolean first = true; sb.append("rqst:"); @@ -177099,15 +180050,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class abort_txns_argsStandardSchemeFactory implements SchemeFactory { - public abort_txns_argsStandardScheme getScheme() { - return new abort_txns_argsStandardScheme(); + private static class add_transactional_table_argsStandardSchemeFactory implements SchemeFactory { + public add_transactional_table_argsStandardScheme getScheme() { + return new add_transactional_table_argsStandardScheme(); } } - private static class abort_txns_argsStandardScheme extends StandardScheme { + private static class add_transactional_table_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, add_transactional_table_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -177119,7 +180070,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_args str switch (schemeField.id) { case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new AbortTxnsRequest(); + struct.rqst = new AddTransactionalTableRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } else { @@ -177135,7 +180086,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_args str struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txns_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, add_transactional_table_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -177150,16 +180101,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txns_args st } - private static class abort_txns_argsTupleSchemeFactory implements SchemeFactory { - public abort_txns_argsTupleScheme getScheme() { - return new abort_txns_argsTupleScheme(); + private static class add_transactional_table_argsTupleSchemeFactory implements SchemeFactory { + public add_transactional_table_argsTupleScheme getScheme() { + return new add_transactional_table_argsTupleScheme(); } } - private static class abort_txns_argsTupleScheme extends TupleScheme { + private static class add_transactional_table_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, abort_txns_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, add_transactional_table_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRqst()) { @@ -177172,11 +180123,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, abort_txns_args str } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, abort_txns_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, add_transactional_table_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new AbortTxnsRequest(); + struct.rqst = new AddTransactionalTableRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } @@ -177185,18 +180136,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, abort_txns_args stru } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class abort_txns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("abort_txns_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_transactional_table_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_transactional_table_result"); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new abort_txns_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new abort_txns_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new add_transactional_table_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_transactional_table_resultTupleSchemeFactory()); } - private NoSuchTxnException o1; // required + private MetaException o1; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -177263,14 +180214,14 @@ public String getFieldName() { tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(abort_txns_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_transactional_table_result.class, metaDataMap); } - public abort_txns_result() { + public add_transactional_table_result() { } - public abort_txns_result( - NoSuchTxnException o1) + public add_transactional_table_result( + MetaException o1) { this(); this.o1 = o1; @@ -177279,14 +180230,14 @@ public abort_txns_result( /** * Performs a deep copy on other. */ - public abort_txns_result(abort_txns_result other) { + public add_transactional_table_result(add_transactional_table_result other) { if (other.isSetO1()) { - this.o1 = new NoSuchTxnException(other.o1); + this.o1 = new MetaException(other.o1); } } - public abort_txns_result deepCopy() { - return new abort_txns_result(this); + public add_transactional_table_result deepCopy() { + return new add_transactional_table_result(this); } @Override @@ -177294,11 +180245,11 @@ public void clear() { this.o1 = null; } - public NoSuchTxnException getO1() { + public MetaException getO1() { return this.o1; } - public void setO1(NoSuchTxnException o1) { + public void setO1(MetaException o1) { this.o1 = o1; } @@ -177323,7 +180274,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO1(); } else { - setO1((NoSuchTxnException)value); + setO1((MetaException)value); } break; @@ -177356,12 +180307,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof abort_txns_result) - return this.equals((abort_txns_result)that); + if (that instanceof add_transactional_table_result) + return this.equals((add_transactional_table_result)that); return false; } - public boolean equals(abort_txns_result that) { + public boolean equals(add_transactional_table_result that) { if (that == null) return false; @@ -177390,7 +180341,7 @@ public int hashCode() { } @Override - public int compareTo(abort_txns_result other) { + public int compareTo(add_transactional_table_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -177424,7 +180375,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("abort_txns_result("); + StringBuilder sb = new StringBuilder("add_transactional_table_result("); boolean first = true; sb.append("o1:"); @@ -177459,15 +180410,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class abort_txns_resultStandardSchemeFactory implements SchemeFactory { - public abort_txns_resultStandardScheme getScheme() { - return new abort_txns_resultStandardScheme(); + private static class add_transactional_table_resultStandardSchemeFactory implements SchemeFactory { + public add_transactional_table_resultStandardScheme getScheme() { + return new add_transactional_table_resultStandardScheme(); } } - private static class abort_txns_resultStandardScheme extends StandardScheme { + private static class add_transactional_table_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, add_transactional_table_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -177479,7 +180430,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_result s switch (schemeField.id) { case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new NoSuchTxnException(); + struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -177495,7 +180446,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, abort_txns_result s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txns_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, add_transactional_table_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -177510,16 +180461,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, abort_txns_result } - private static class abort_txns_resultTupleSchemeFactory implements SchemeFactory { - public abort_txns_resultTupleScheme getScheme() { - return new abort_txns_resultTupleScheme(); + private static class add_transactional_table_resultTupleSchemeFactory implements SchemeFactory { + public add_transactional_table_resultTupleScheme getScheme() { + return new add_transactional_table_resultTupleScheme(); } } - private static class abort_txns_resultTupleScheme extends TupleScheme { + private static class add_transactional_table_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, abort_txns_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, add_transactional_table_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetO1()) { @@ -177532,11 +180483,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, abort_txns_result s } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, abort_txns_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, add_transactional_table_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.o1 = new NoSuchTxnException(); + struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); } @@ -177545,18 +180496,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, abort_txns_result st } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class commit_txn_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("commit_txn_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class allocate_table_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("allocate_table_write_id_args"); private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new commit_txn_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new commit_txn_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new allocate_table_write_id_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new allocate_table_write_id_argsTupleSchemeFactory()); } - private CommitTxnRequest rqst; // required + private AllocateTableWriteIdRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -177621,16 +180572,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CommitTxnRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AllocateTableWriteIdRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(commit_txn_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(allocate_table_write_id_args.class, metaDataMap); } - public commit_txn_args() { + public allocate_table_write_id_args() { } - public commit_txn_args( - CommitTxnRequest rqst) + public allocate_table_write_id_args( + AllocateTableWriteIdRequest rqst) { this(); this.rqst = rqst; @@ -177639,14 +180590,14 @@ public commit_txn_args( /** * Performs a deep copy on other. */ - public commit_txn_args(commit_txn_args other) { + public allocate_table_write_id_args(allocate_table_write_id_args other) { if (other.isSetRqst()) { - this.rqst = new CommitTxnRequest(other.rqst); + this.rqst = new AllocateTableWriteIdRequest(other.rqst); } } - public commit_txn_args deepCopy() { - return new commit_txn_args(this); + public allocate_table_write_id_args deepCopy() { + return new allocate_table_write_id_args(this); } @Override @@ -177654,11 +180605,11 @@ public void clear() { this.rqst = null; } - public CommitTxnRequest getRqst() { + public AllocateTableWriteIdRequest getRqst() { return this.rqst; } - public void setRqst(CommitTxnRequest rqst) { + public void setRqst(AllocateTableWriteIdRequest rqst) { this.rqst = rqst; } @@ -177683,7 +180634,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRqst(); } else { - setRqst((CommitTxnRequest)value); + setRqst((AllocateTableWriteIdRequest)value); } break; @@ -177716,12 +180667,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof commit_txn_args) - return this.equals((commit_txn_args)that); + if (that instanceof allocate_table_write_id_args) + return this.equals((allocate_table_write_id_args)that); return false; } - public boolean equals(commit_txn_args that) { + public boolean equals(allocate_table_write_id_args that) { if (that == null) return false; @@ -177750,7 +180701,7 @@ public int hashCode() { } @Override - public int compareTo(commit_txn_args other) { + public int compareTo(allocate_table_write_id_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -177784,7 +180735,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("commit_txn_args("); + StringBuilder sb = new StringBuilder("allocate_table_write_id_args("); boolean first = true; sb.append("rqst:"); @@ -177822,15 +180773,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class commit_txn_argsStandardSchemeFactory implements SchemeFactory { - public commit_txn_argsStandardScheme getScheme() { - return new commit_txn_argsStandardScheme(); + private static class allocate_table_write_id_argsStandardSchemeFactory implements SchemeFactory { + public allocate_table_write_id_argsStandardScheme getScheme() { + return new allocate_table_write_id_argsStandardScheme(); } } - private static class commit_txn_argsStandardScheme extends StandardScheme { + private static class allocate_table_write_id_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, allocate_table_write_id_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -177842,7 +180793,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_args str switch (schemeField.id) { case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new CommitTxnRequest(); + struct.rqst = new AllocateTableWriteIdRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } else { @@ -177858,7 +180809,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_args str struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, commit_txn_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, allocate_table_write_id_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -177873,16 +180824,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, commit_txn_args st } - private static class commit_txn_argsTupleSchemeFactory implements SchemeFactory { - public commit_txn_argsTupleScheme getScheme() { - return new commit_txn_argsTupleScheme(); + private static class allocate_table_write_id_argsTupleSchemeFactory implements SchemeFactory { + public allocate_table_write_id_argsTupleScheme getScheme() { + return new allocate_table_write_id_argsTupleScheme(); } } - private static class commit_txn_argsTupleScheme extends TupleScheme { + private static class allocate_table_write_id_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, commit_txn_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, allocate_table_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRqst()) { @@ -177895,11 +180846,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, commit_txn_args str } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, commit_txn_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, allocate_table_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new CommitTxnRequest(); + struct.rqst = new AllocateTableWriteIdRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } @@ -177908,25 +180859,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, commit_txn_args stru } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class commit_txn_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("commit_txn_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class allocate_table_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("allocate_table_write_id_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new commit_txn_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new commit_txn_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new allocate_table_write_id_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new allocate_table_write_id_resultTupleSchemeFactory()); } + private AllocateTableWriteIdResponse success; // required private NoSuchTxnException o1; // required private TxnAbortedException o2; // required + private MetaException o3; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), O1((short)1, "o1"), - O2((short)2, "o2"); + O2((short)2, "o2"), + O3((short)3, "o3"); private static final Map byName = new HashMap(); @@ -177941,10 +180898,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, commit_txn_args stru */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; case 1: // O1 return O1; case 2: // O2 return O2; + case 3: // O3 + return O3; default: return null; } @@ -177988,46 +180949,85 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AllocateTableWriteIdResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(commit_txn_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(allocate_table_write_id_result.class, metaDataMap); } - public commit_txn_result() { + public allocate_table_write_id_result() { } - public commit_txn_result( + public allocate_table_write_id_result( + AllocateTableWriteIdResponse success, NoSuchTxnException o1, - TxnAbortedException o2) + TxnAbortedException o2, + MetaException o3) { this(); + this.success = success; this.o1 = o1; this.o2 = o2; + this.o3 = o3; } /** * Performs a deep copy on other. */ - public commit_txn_result(commit_txn_result other) { + public allocate_table_write_id_result(allocate_table_write_id_result other) { + if (other.isSetSuccess()) { + this.success = new AllocateTableWriteIdResponse(other.success); + } if (other.isSetO1()) { this.o1 = new NoSuchTxnException(other.o1); } if (other.isSetO2()) { this.o2 = new TxnAbortedException(other.o2); } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } } - public commit_txn_result deepCopy() { - return new commit_txn_result(this); + public allocate_table_write_id_result deepCopy() { + return new allocate_table_write_id_result(this); } @Override public void clear() { + this.success = null; this.o1 = null; this.o2 = null; + this.o3 = null; + } + + public AllocateTableWriteIdResponse getSuccess() { + return this.success; + } + + public void setSuccess(AllocateTableWriteIdResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } } public NoSuchTxnException getO1() { @@ -178076,8 +181076,39 @@ public void setO2IsSet(boolean value) { } } + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((AllocateTableWriteIdResponse)value); + } + break; + case O1: if (value == null) { unsetO1(); @@ -178094,17 +181125,31 @@ public void setFieldValue(_Fields field, Object value) { } break; + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + case O1: return getO1(); case O2: return getO2(); + case O3: + return getO3(); + } throw new IllegalStateException(); } @@ -178116,10 +181161,14 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); case O1: return isSetO1(); case O2: return isSetO2(); + case O3: + return isSetO3(); } throw new IllegalStateException(); } @@ -178128,15 +181177,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof commit_txn_result) - return this.equals((commit_txn_result)that); + if (that instanceof allocate_table_write_id_result) + return this.equals((allocate_table_write_id_result)that); return false; } - public boolean equals(commit_txn_result that) { + public boolean equals(allocate_table_write_id_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + boolean this_present_o1 = true && this.isSetO1(); boolean that_present_o1 = true && that.isSetO1(); if (this_present_o1 || that_present_o1) { @@ -178155,6 +181213,15 @@ public boolean equals(commit_txn_result that) { return false; } + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + return true; } @@ -178162,6 +181229,11 @@ public boolean equals(commit_txn_result that) { public int hashCode() { List list = new ArrayList(); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + boolean present_o1 = true && (isSetO1()); list.add(present_o1); if (present_o1) @@ -178172,17 +181244,32 @@ public int hashCode() { if (present_o2) list.add(o2); + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + return list.hashCode(); } @Override - public int compareTo(commit_txn_result other) { + public int compareTo(allocate_table_write_id_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; @@ -178203,6 +181290,16 @@ public int compareTo(commit_txn_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -178220,9 +181317,17 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("commit_txn_result("); + StringBuilder sb = new StringBuilder("allocate_table_write_id_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); sb.append("o1:"); if (this.o1 == null) { sb.append("null"); @@ -178238,6 +181343,14 @@ public String toString() { sb.append(this.o2); } first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; sb.append(")"); return sb.toString(); } @@ -178245,6 +181358,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -178263,15 +181379,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class commit_txn_resultStandardSchemeFactory implements SchemeFactory { - public commit_txn_resultStandardScheme getScheme() { - return new commit_txn_resultStandardScheme(); + private static class allocate_table_write_id_resultStandardSchemeFactory implements SchemeFactory { + public allocate_table_write_id_resultStandardScheme getScheme() { + return new allocate_table_write_id_resultStandardScheme(); } } - private static class commit_txn_resultStandardScheme extends StandardScheme { + private static class allocate_table_write_id_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, allocate_table_write_id_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -178281,6 +181397,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_result s break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new AllocateTableWriteIdResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.o1 = new NoSuchTxnException(); @@ -178299,6 +181424,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_result s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -178308,10 +181442,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, commit_txn_result s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, commit_txn_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, allocate_table_write_id_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } if (struct.o1 != null) { oprot.writeFieldBegin(O1_FIELD_DESC); struct.o1.write(oprot); @@ -178322,53 +181461,80 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, commit_txn_result struct.o2.write(oprot); oprot.writeFieldEnd(); } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class commit_txn_resultTupleSchemeFactory implements SchemeFactory { - public commit_txn_resultTupleScheme getScheme() { - return new commit_txn_resultTupleScheme(); + private static class allocate_table_write_id_resultTupleSchemeFactory implements SchemeFactory { + public allocate_table_write_id_resultTupleScheme getScheme() { + return new allocate_table_write_id_resultTupleScheme(); } } - private static class commit_txn_resultTupleScheme extends TupleScheme { + private static class allocate_table_write_id_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, commit_txn_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, allocate_table_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetO1()) { + if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO2()) { + if (struct.isSetO1()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetO2()) { + optionals.set(2); + } + if (struct.isSetO3()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } if (struct.isSetO1()) { struct.o1.write(oprot); } if (struct.isSetO2()) { struct.o2.write(oprot); } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, commit_txn_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, allocate_table_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { + struct.success = new AllocateTableWriteIdResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { struct.o1 = new NoSuchTxnException(); struct.o1.read(iprot); struct.setO1IsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.o2 = new TxnAbortedException(); struct.o2.read(iprot); struct.setO2IsSet(true); } + if (incoming.get(3)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } } } @@ -186956,13 +190122,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_last_completed_ case 1: // DB_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1402 = iprot.readListBegin(); - struct.db_names = new ArrayList(_list1402.size); - String _elem1403; - for (int _i1404 = 0; _i1404 < _list1402.size; ++_i1404) + org.apache.thrift.protocol.TList _list1442 = iprot.readListBegin(); + struct.db_names = new ArrayList(_list1442.size); + String _elem1443; + for (int _i1444 = 0; _i1444 < _list1442.size; ++_i1444) { - _elem1403 = iprot.readString(); - struct.db_names.add(_elem1403); + _elem1443 = iprot.readString(); + struct.db_names.add(_elem1443); } iprot.readListEnd(); } @@ -186974,13 +190140,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_last_completed_ case 2: // TABLE_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1405 = iprot.readListBegin(); - struct.table_names = new ArrayList(_list1405.size); - String _elem1406; - for (int _i1407 = 0; _i1407 < _list1405.size; ++_i1407) + org.apache.thrift.protocol.TList _list1445 = iprot.readListBegin(); + struct.table_names = new ArrayList(_list1445.size); + String _elem1446; + for (int _i1447 = 0; _i1447 < _list1445.size; ++_i1447) { - _elem1406 = iprot.readString(); - struct.table_names.add(_elem1406); + _elem1446 = iprot.readString(); + struct.table_names.add(_elem1446); } iprot.readListEnd(); } @@ -187015,9 +190181,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_last_completed oprot.writeFieldBegin(DB_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.db_names.size())); - for (String _iter1408 : struct.db_names) + for (String _iter1448 : struct.db_names) { - oprot.writeString(_iter1408); + oprot.writeString(_iter1448); } oprot.writeListEnd(); } @@ -187027,9 +190193,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_last_completed oprot.writeFieldBegin(TABLE_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.table_names.size())); - for (String _iter1409 : struct.table_names) + for (String _iter1449 : struct.table_names) { - oprot.writeString(_iter1409); + oprot.writeString(_iter1449); } oprot.writeListEnd(); } @@ -187071,18 +190237,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_last_completed_ if (struct.isSetDb_names()) { { oprot.writeI32(struct.db_names.size()); - for (String _iter1410 : struct.db_names) + for (String _iter1450 : struct.db_names) { - oprot.writeString(_iter1410); + oprot.writeString(_iter1450); } } } if (struct.isSetTable_names()) { { oprot.writeI32(struct.table_names.size()); - for (String _iter1411 : struct.table_names) + for (String _iter1451 : struct.table_names) { - oprot.writeString(_iter1411); + oprot.writeString(_iter1451); } } } @@ -187097,26 +190263,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_last_completed_t BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1412 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.db_names = new ArrayList(_list1412.size); - String _elem1413; - for (int _i1414 = 0; _i1414 < _list1412.size; ++_i1414) + org.apache.thrift.protocol.TList _list1452 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.db_names = new ArrayList(_list1452.size); + String _elem1453; + for (int _i1454 = 0; _i1454 < _list1452.size; ++_i1454) { - _elem1413 = iprot.readString(); - struct.db_names.add(_elem1413); + _elem1453 = iprot.readString(); + struct.db_names.add(_elem1453); } } struct.setDb_namesIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1415 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.table_names = new ArrayList(_list1415.size); - String _elem1416; - for (int _i1417 = 0; _i1417 < _list1415.size; ++_i1417) + org.apache.thrift.protocol.TList _list1455 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.table_names = new ArrayList(_list1455.size); + String _elem1456; + for (int _i1457 = 0; _i1457 < _list1455.size; ++_i1457) { - _elem1416 = iprot.readString(); - struct.table_names.add(_elem1416); + _elem1456 = iprot.readString(); + struct.table_names.add(_elem1456); } } struct.setTable_namesIsSet(true); @@ -187446,14 +190612,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_last_completed_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1418 = iprot.readListBegin(); - struct.success = new ArrayList(_list1418.size); - BasicTxnInfo _elem1419; - for (int _i1420 = 0; _i1420 < _list1418.size; ++_i1420) + org.apache.thrift.protocol.TList _list1458 = iprot.readListBegin(); + struct.success = new ArrayList(_list1458.size); + BasicTxnInfo _elem1459; + for (int _i1460 = 0; _i1460 < _list1458.size; ++_i1460) { - _elem1419 = new BasicTxnInfo(); - _elem1419.read(iprot); - struct.success.add(_elem1419); + _elem1459 = new BasicTxnInfo(); + _elem1459.read(iprot); + struct.success.add(_elem1459); } iprot.readListEnd(); } @@ -187479,9 +190645,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_last_completed oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (BasicTxnInfo _iter1421 : struct.success) + for (BasicTxnInfo _iter1461 : struct.success) { - _iter1421.write(oprot); + _iter1461.write(oprot); } oprot.writeListEnd(); } @@ -187512,9 +190678,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_last_completed_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (BasicTxnInfo _iter1422 : struct.success) + for (BasicTxnInfo _iter1462 : struct.success) { - _iter1422.write(oprot); + _iter1462.write(oprot); } } } @@ -187526,14 +190692,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_last_completed_t BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1423 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1423.size); - BasicTxnInfo _elem1424; - for (int _i1425 = 0; _i1425 < _list1423.size; ++_i1425) + org.apache.thrift.protocol.TList _list1463 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1463.size); + BasicTxnInfo _elem1464; + for (int _i1465 = 0; _i1465 < _list1463.size; ++_i1465) { - _elem1424 = new BasicTxnInfo(); - _elem1424.read(iprot); - struct.success.add(_elem1424); + _elem1464 = new BasicTxnInfo(); + _elem1464.read(iprot); + struct.success.add(_elem1464); } } struct.setSuccessIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnToWriteId.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnToWriteId.java new file mode 100644 index 0000000..4d5f435 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnToWriteId.java @@ -0,0 +1,482 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class TxnToWriteId implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TxnToWriteId"); + + private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)1); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TxnToWriteIdStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TxnToWriteIdTupleSchemeFactory()); + } + + private long txnId; // required + private long writeId; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TXN_ID((short)1, "txnId"), + WRITE_ID((short)2, "writeId"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TXN_ID + return TXN_ID; + case 2: // WRITE_ID + return WRITE_ID; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __TXNID_ISSET_ID = 0; + private static final int __WRITEID_ISSET_ID = 1; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TxnToWriteId.class, metaDataMap); + } + + public TxnToWriteId() { + } + + public TxnToWriteId( + long txnId, + long writeId) + { + this(); + this.txnId = txnId; + setTxnIdIsSet(true); + this.writeId = writeId; + setWriteIdIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public TxnToWriteId(TxnToWriteId other) { + __isset_bitfield = other.__isset_bitfield; + this.txnId = other.txnId; + this.writeId = other.writeId; + } + + public TxnToWriteId deepCopy() { + return new TxnToWriteId(this); + } + + @Override + public void clear() { + setTxnIdIsSet(false); + this.txnId = 0; + setWriteIdIsSet(false); + this.writeId = 0; + } + + public long getTxnId() { + return this.txnId; + } + + public void setTxnId(long txnId) { + this.txnId = txnId; + setTxnIdIsSet(true); + } + + public void unsetTxnId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + /** Returns true if field txnId is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnId() { + return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID); + } + + public void setTxnIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value); + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TXN_ID: + if (value == null) { + unsetTxnId(); + } else { + setTxnId((Long)value); + } + break; + + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TXN_ID: + return getTxnId(); + + case WRITE_ID: + return getWriteId(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TXN_ID: + return isSetTxnId(); + case WRITE_ID: + return isSetWriteId(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TxnToWriteId) + return this.equals((TxnToWriteId)that); + return false; + } + + public boolean equals(TxnToWriteId that) { + if (that == null) + return false; + + boolean this_present_txnId = true; + boolean that_present_txnId = true; + if (this_present_txnId || that_present_txnId) { + if (!(this_present_txnId && that_present_txnId)) + return false; + if (this.txnId != that.txnId) + return false; + } + + boolean this_present_writeId = true; + boolean that_present_writeId = true; + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_txnId = true; + list.add(present_txnId); + if (present_txnId) + list.add(txnId); + + boolean present_writeId = true; + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + return list.hashCode(); + } + + @Override + public int compareTo(TxnToWriteId other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TxnToWriteId("); + boolean first = true; + + sb.append("txnId:"); + sb.append(this.txnId); + first = false; + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetTxnId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'txnId' is unset! Struct:" + toString()); + } + + if (!isSetWriteId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeId' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TxnToWriteIdStandardSchemeFactory implements SchemeFactory { + public TxnToWriteIdStandardScheme getScheme() { + return new TxnToWriteIdStandardScheme(); + } + } + + private static class TxnToWriteIdStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TxnToWriteId struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TXN_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TxnToWriteId struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(TXN_ID_FIELD_DESC); + oprot.writeI64(struct.txnId); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TxnToWriteIdTupleSchemeFactory implements SchemeFactory { + public TxnToWriteIdTupleScheme getScheme() { + return new TxnToWriteIdTupleScheme(); + } + } + + private static class TxnToWriteIdTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TxnToWriteId struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.txnId); + oprot.writeI64(struct.writeId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TxnToWriteId struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.txnId = iprot.readI64(); + struct.setTxnIdIsSet(true); + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnsSnapshot.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnsSnapshot.java index 5600bda..3c47c86 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnsSnapshot.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnsSnapshot.java @@ -444,13 +444,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TxnsSnapshot struct case 2: // OPEN_TXNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list624 = iprot.readListBegin(); - struct.open_txns = new ArrayList(_list624.size); - long _elem625; - for (int _i626 = 0; _i626 < _list624.size; ++_i626) + org.apache.thrift.protocol.TList _list664 = iprot.readListBegin(); + struct.open_txns = new ArrayList(_list664.size); + long _elem665; + for (int _i666 = 0; _i666 < _list664.size; ++_i666) { - _elem625 = iprot.readI64(); - struct.open_txns.add(_elem625); + _elem665 = iprot.readI64(); + struct.open_txns.add(_elem665); } iprot.readListEnd(); } @@ -479,9 +479,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TxnsSnapshot struc oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.open_txns.size())); - for (long _iter627 : struct.open_txns) + for (long _iter667 : struct.open_txns) { - oprot.writeI64(_iter627); + oprot.writeI64(_iter667); } oprot.writeListEnd(); } @@ -507,9 +507,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TxnsSnapshot struct oprot.writeI64(struct.txn_high_water_mark); { oprot.writeI32(struct.open_txns.size()); - for (long _iter628 : struct.open_txns) + for (long _iter668 : struct.open_txns) { - oprot.writeI64(_iter628); + oprot.writeI64(_iter668); } } } @@ -520,13 +520,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TxnsSnapshot struct) struct.txn_high_water_mark = iprot.readI64(); struct.setTxn_high_water_markIsSet(true); { - org.apache.thrift.protocol.TList _list629 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.open_txns = new ArrayList(_list629.size); - long _elem630; - for (int _i631 = 0; _i631 < _list629.size; ++_i631) + org.apache.thrift.protocol.TList _list669 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.open_txns = new ArrayList(_list669.size); + long _elem670; + for (int _i671 = 0; _i671 < _list669.size; ++_i671) { - _elem630 = iprot.readI64(); - struct.open_txns.add(_elem630); + _elem670 = iprot.readI64(); + struct.open_txns.add(_elem670); } } struct.setOpen_txnsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java index c9988c0..9bbfcbf 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java @@ -755,14 +755,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 2: // POOLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); - struct.pools = new ArrayList(_list764.size); - WMPool _elem765; - for (int _i766 = 0; _i766 < _list764.size; ++_i766) + org.apache.thrift.protocol.TList _list804 = iprot.readListBegin(); + struct.pools = new ArrayList(_list804.size); + WMPool _elem805; + for (int _i806 = 0; _i806 < _list804.size; ++_i806) { - _elem765 = new WMPool(); - _elem765.read(iprot); - struct.pools.add(_elem765); + _elem805 = new WMPool(); + _elem805.read(iprot); + struct.pools.add(_elem805); } iprot.readListEnd(); } @@ -774,14 +774,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 3: // MAPPINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list767 = iprot.readListBegin(); - struct.mappings = new ArrayList(_list767.size); - WMMapping _elem768; - for (int _i769 = 0; _i769 < _list767.size; ++_i769) + org.apache.thrift.protocol.TList _list807 = iprot.readListBegin(); + struct.mappings = new ArrayList(_list807.size); + WMMapping _elem808; + for (int _i809 = 0; _i809 < _list807.size; ++_i809) { - _elem768 = new WMMapping(); - _elem768.read(iprot); - struct.mappings.add(_elem768); + _elem808 = new WMMapping(); + _elem808.read(iprot); + struct.mappings.add(_elem808); } iprot.readListEnd(); } @@ -793,14 +793,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 4: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list770 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list770.size); - WMTrigger _elem771; - for (int _i772 = 0; _i772 < _list770.size; ++_i772) + org.apache.thrift.protocol.TList _list810 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list810.size); + WMTrigger _elem811; + for (int _i812 = 0; _i812 < _list810.size; ++_i812) { - _elem771 = new WMTrigger(); - _elem771.read(iprot); - struct.triggers.add(_elem771); + _elem811 = new WMTrigger(); + _elem811.read(iprot); + struct.triggers.add(_elem811); } iprot.readListEnd(); } @@ -812,14 +812,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 5: // POOL_TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list773 = iprot.readListBegin(); - struct.poolTriggers = new ArrayList(_list773.size); - WMPoolTrigger _elem774; - for (int _i775 = 0; _i775 < _list773.size; ++_i775) + org.apache.thrift.protocol.TList _list813 = iprot.readListBegin(); + struct.poolTriggers = new ArrayList(_list813.size); + WMPoolTrigger _elem814; + for (int _i815 = 0; _i815 < _list813.size; ++_i815) { - _elem774 = new WMPoolTrigger(); - _elem774.read(iprot); - struct.poolTriggers.add(_elem774); + _elem814 = new WMPoolTrigger(); + _elem814.read(iprot); + struct.poolTriggers.add(_elem814); } iprot.readListEnd(); } @@ -850,9 +850,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.pools.size())); - for (WMPool _iter776 : struct.pools) + for (WMPool _iter816 : struct.pools) { - _iter776.write(oprot); + _iter816.write(oprot); } oprot.writeListEnd(); } @@ -863,9 +863,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(MAPPINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mappings.size())); - for (WMMapping _iter777 : struct.mappings) + for (WMMapping _iter817 : struct.mappings) { - _iter777.write(oprot); + _iter817.write(oprot); } oprot.writeListEnd(); } @@ -877,9 +877,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter778 : struct.triggers) + for (WMTrigger _iter818 : struct.triggers) { - _iter778.write(oprot); + _iter818.write(oprot); } oprot.writeListEnd(); } @@ -891,9 +891,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOL_TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.poolTriggers.size())); - for (WMPoolTrigger _iter779 : struct.poolTriggers) + for (WMPoolTrigger _iter819 : struct.poolTriggers) { - _iter779.write(oprot); + _iter819.write(oprot); } oprot.writeListEnd(); } @@ -920,9 +920,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan struct.plan.write(oprot); { oprot.writeI32(struct.pools.size()); - for (WMPool _iter780 : struct.pools) + for (WMPool _iter820 : struct.pools) { - _iter780.write(oprot); + _iter820.write(oprot); } } BitSet optionals = new BitSet(); @@ -939,27 +939,27 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan if (struct.isSetMappings()) { { oprot.writeI32(struct.mappings.size()); - for (WMMapping _iter781 : struct.mappings) + for (WMMapping _iter821 : struct.mappings) { - _iter781.write(oprot); + _iter821.write(oprot); } } } if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter782 : struct.triggers) + for (WMTrigger _iter822 : struct.triggers) { - _iter782.write(oprot); + _iter822.write(oprot); } } } if (struct.isSetPoolTriggers()) { { oprot.writeI32(struct.poolTriggers.size()); - for (WMPoolTrigger _iter783 : struct.poolTriggers) + for (WMPoolTrigger _iter823 : struct.poolTriggers) { - _iter783.write(oprot); + _iter823.write(oprot); } } } @@ -972,56 +972,56 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan s struct.plan.read(iprot); struct.setPlanIsSet(true); { - org.apache.thrift.protocol.TList _list784 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.pools = new ArrayList(_list784.size); - WMPool _elem785; - for (int _i786 = 0; _i786 < _list784.size; ++_i786) + org.apache.thrift.protocol.TList _list824 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.pools = new ArrayList(_list824.size); + WMPool _elem825; + for (int _i826 = 0; _i826 < _list824.size; ++_i826) { - _elem785 = new WMPool(); - _elem785.read(iprot); - struct.pools.add(_elem785); + _elem825 = new WMPool(); + _elem825.read(iprot); + struct.pools.add(_elem825); } } struct.setPoolsIsSet(true); BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list787 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.mappings = new ArrayList(_list787.size); - WMMapping _elem788; - for (int _i789 = 0; _i789 < _list787.size; ++_i789) + org.apache.thrift.protocol.TList _list827 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.mappings = new ArrayList(_list827.size); + WMMapping _elem828; + for (int _i829 = 0; _i829 < _list827.size; ++_i829) { - _elem788 = new WMMapping(); - _elem788.read(iprot); - struct.mappings.add(_elem788); + _elem828 = new WMMapping(); + _elem828.read(iprot); + struct.mappings.add(_elem828); } } struct.setMappingsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list790 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list790.size); - WMTrigger _elem791; - for (int _i792 = 0; _i792 < _list790.size; ++_i792) + org.apache.thrift.protocol.TList _list830 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list830.size); + WMTrigger _elem831; + for (int _i832 = 0; _i832 < _list830.size; ++_i832) { - _elem791 = new WMTrigger(); - _elem791.read(iprot); - struct.triggers.add(_elem791); + _elem831 = new WMTrigger(); + _elem831.read(iprot); + struct.triggers.add(_elem831); } } struct.setTriggersIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.poolTriggers = new ArrayList(_list793.size); - WMPoolTrigger _elem794; - for (int _i795 = 0; _i795 < _list793.size; ++_i795) + org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.poolTriggers = new ArrayList(_list833.size); + WMPoolTrigger _elem834; + for (int _i835 = 0; _i835 < _list833.size; ++_i835) { - _elem794 = new WMPoolTrigger(); - _elem794.read(iprot); - struct.poolTriggers.add(_elem794); + _elem834 = new WMPoolTrigger(); + _elem834.read(iprot); + struct.poolTriggers.add(_elem834); } } struct.setPoolTriggersIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java index fb96ad9..03a8d19 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetAllResourcePla case 1: // RESOURCE_PLANS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list796 = iprot.readListBegin(); - struct.resourcePlans = new ArrayList(_list796.size); - WMResourcePlan _elem797; - for (int _i798 = 0; _i798 < _list796.size; ++_i798) + org.apache.thrift.protocol.TList _list836 = iprot.readListBegin(); + struct.resourcePlans = new ArrayList(_list836.size); + WMResourcePlan _elem837; + for (int _i838 = 0; _i838 < _list836.size; ++_i838) { - _elem797 = new WMResourcePlan(); - _elem797.read(iprot); - struct.resourcePlans.add(_elem797); + _elem837 = new WMResourcePlan(); + _elem837.read(iprot); + struct.resourcePlans.add(_elem837); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetAllResourcePl oprot.writeFieldBegin(RESOURCE_PLANS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourcePlans.size())); - for (WMResourcePlan _iter799 : struct.resourcePlans) + for (WMResourcePlan _iter839 : struct.resourcePlans) { - _iter799.write(oprot); + _iter839.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePla if (struct.isSetResourcePlans()) { { oprot.writeI32(struct.resourcePlans.size()); - for (WMResourcePlan _iter800 : struct.resourcePlans) + for (WMResourcePlan _iter840 : struct.resourcePlans) { - _iter800.write(oprot); + _iter840.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePlan BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourcePlans = new ArrayList(_list801.size); - WMResourcePlan _elem802; - for (int _i803 = 0; _i803 < _list801.size; ++_i803) + org.apache.thrift.protocol.TList _list841 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourcePlans = new ArrayList(_list841.size); + WMResourcePlan _elem842; + for (int _i843 = 0; _i843 < _list841.size; ++_i843) { - _elem802 = new WMResourcePlan(); - _elem802.read(iprot); - struct.resourcePlans.add(_elem802); + _elem842 = new WMResourcePlan(); + _elem842.read(iprot); + struct.resourcePlans.add(_elem842); } } struct.setResourcePlansIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java index 4d4894a..9a89f01 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetTriggersForRes case 1: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list820 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list820.size); - WMTrigger _elem821; - for (int _i822 = 0; _i822 < _list820.size; ++_i822) + org.apache.thrift.protocol.TList _list860 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list860.size); + WMTrigger _elem861; + for (int _i862 = 0; _i862 < _list860.size; ++_i862) { - _elem821 = new WMTrigger(); - _elem821.read(iprot); - struct.triggers.add(_elem821); + _elem861 = new WMTrigger(); + _elem861.read(iprot); + struct.triggers.add(_elem861); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetTriggersForRe oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter823 : struct.triggers) + for (WMTrigger _iter863 : struct.triggers) { - _iter823.write(oprot); + _iter863.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForRes if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter824 : struct.triggers) + for (WMTrigger _iter864 : struct.triggers) { - _iter824.write(oprot); + _iter864.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForReso BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list825.size); - WMTrigger _elem826; - for (int _i827 = 0; _i827 < _list825.size; ++_i827) + org.apache.thrift.protocol.TList _list865 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list865.size); + WMTrigger _elem866; + for (int _i867 = 0; _i867 < _list865.size; ++_i867) { - _elem826 = new WMTrigger(); - _elem826.read(iprot); - struct.triggers.add(_elem826); + _elem866 = new WMTrigger(); + _elem866.read(iprot); + struct.triggers.add(_elem866); } } struct.setTriggersIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java index ea8f3aa..4e57dc7 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java @@ -441,13 +441,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 1: // ERRORS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list804 = iprot.readListBegin(); - struct.errors = new ArrayList(_list804.size); - String _elem805; - for (int _i806 = 0; _i806 < _list804.size; ++_i806) + org.apache.thrift.protocol.TList _list844 = iprot.readListBegin(); + struct.errors = new ArrayList(_list844.size); + String _elem845; + for (int _i846 = 0; _i846 < _list844.size; ++_i846) { - _elem805 = iprot.readString(); - struct.errors.add(_elem805); + _elem845 = iprot.readString(); + struct.errors.add(_elem845); } iprot.readListEnd(); } @@ -459,13 +459,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 2: // WARNINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list807 = iprot.readListBegin(); - struct.warnings = new ArrayList(_list807.size); - String _elem808; - for (int _i809 = 0; _i809 < _list807.size; ++_i809) + org.apache.thrift.protocol.TList _list847 = iprot.readListBegin(); + struct.warnings = new ArrayList(_list847.size); + String _elem848; + for (int _i849 = 0; _i849 < _list847.size; ++_i849) { - _elem808 = iprot.readString(); - struct.warnings.add(_elem808); + _elem848 = iprot.readString(); + struct.warnings.add(_elem848); } iprot.readListEnd(); } @@ -492,9 +492,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(ERRORS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.errors.size())); - for (String _iter810 : struct.errors) + for (String _iter850 : struct.errors) { - oprot.writeString(_iter810); + oprot.writeString(_iter850); } oprot.writeListEnd(); } @@ -506,9 +506,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(WARNINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.warnings.size())); - for (String _iter811 : struct.warnings) + for (String _iter851 : struct.warnings) { - oprot.writeString(_iter811); + oprot.writeString(_iter851); } oprot.writeListEnd(); } @@ -543,18 +543,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMValidateResourceP if (struct.isSetErrors()) { { oprot.writeI32(struct.errors.size()); - for (String _iter812 : struct.errors) + for (String _iter852 : struct.errors) { - oprot.writeString(_iter812); + oprot.writeString(_iter852); } } } if (struct.isSetWarnings()) { { oprot.writeI32(struct.warnings.size()); - for (String _iter813 : struct.warnings) + for (String _iter853 : struct.warnings) { - oprot.writeString(_iter813); + oprot.writeString(_iter853); } } } @@ -566,26 +566,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMValidateResourcePl BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list814 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.errors = new ArrayList(_list814.size); - String _elem815; - for (int _i816 = 0; _i816 < _list814.size; ++_i816) + org.apache.thrift.protocol.TList _list854 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.errors = new ArrayList(_list854.size); + String _elem855; + for (int _i856 = 0; _i856 < _list854.size; ++_i856) { - _elem815 = iprot.readString(); - struct.errors.add(_elem815); + _elem855 = iprot.readString(); + struct.errors.add(_elem855); } } struct.setErrorsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.warnings = new ArrayList(_list817.size); - String _elem818; - for (int _i819 = 0; _i819 < _list817.size; ++_i819) + org.apache.thrift.protocol.TList _list857 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.warnings = new ArrayList(_list857.size); + String _elem858; + for (int _i859 = 0; _i859 < _list857.size; ++_i859) { - _elem818 = iprot.readString(); - struct.warnings.add(_elem818); + _elem858 = iprot.readString(); + struct.warnings.add(_elem858); } } struct.setWarningsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index bf7d466..516bb5a 100644 --- a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -1147,6 +1147,26 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { */ public function commit_txn(\metastore\CommitTxnRequest $rqst); /** + * @param \metastore\GetOpenWriteIdsRequest $rqst + * @return \metastore\GetOpenWriteIdsResponse + * @throws \metastore\NoSuchTxnException + * @throws \metastore\MetaException + */ + public function get_open_write_ids(\metastore\GetOpenWriteIdsRequest $rqst); + /** + * @param \metastore\AddTransactionalTableRequest $rqst + * @throws \metastore\MetaException + */ + public function add_transactional_table(\metastore\AddTransactionalTableRequest $rqst); + /** + * @param \metastore\AllocateTableWriteIdRequest $rqst + * @return \metastore\AllocateTableWriteIdResponse + * @throws \metastore\NoSuchTxnException + * @throws \metastore\TxnAbortedException + * @throws \metastore\MetaException + */ + public function allocate_table_write_id(\metastore\AllocateTableWriteIdRequest $rqst); + /** * @param \metastore\LockRequest $rqst * @return \metastore\LockResponse * @throws \metastore\NoSuchTxnException @@ -9567,6 +9587,174 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } + public function get_open_write_ids(\metastore\GetOpenWriteIdsRequest $rqst) + { + $this->send_get_open_write_ids($rqst); + return $this->recv_get_open_write_ids(); + } + + public function send_get_open_write_ids(\metastore\GetOpenWriteIdsRequest $rqst) + { + $args = new \metastore\ThriftHiveMetastore_get_open_write_ids_args(); + $args->rqst = $rqst; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_open_write_ids', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_open_write_ids', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_open_write_ids() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_open_write_ids_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_open_write_ids_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + throw new \Exception("get_open_write_ids failed: unknown result"); + } + + public function add_transactional_table(\metastore\AddTransactionalTableRequest $rqst) + { + $this->send_add_transactional_table($rqst); + $this->recv_add_transactional_table(); + } + + public function send_add_transactional_table(\metastore\AddTransactionalTableRequest $rqst) + { + $args = new \metastore\ThriftHiveMetastore_add_transactional_table_args(); + $args->rqst = $rqst; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'add_transactional_table', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('add_transactional_table', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_add_transactional_table() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_add_transactional_table_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_add_transactional_table_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + return; + } + + public function allocate_table_write_id(\metastore\AllocateTableWriteIdRequest $rqst) + { + $this->send_allocate_table_write_id($rqst); + return $this->recv_allocate_table_write_id(); + } + + public function send_allocate_table_write_id(\metastore\AllocateTableWriteIdRequest $rqst) + { + $args = new \metastore\ThriftHiveMetastore_allocate_table_write_id_args(); + $args->rqst = $rqst; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'allocate_table_write_id', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('allocate_table_write_id', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_allocate_table_write_id() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_allocate_table_write_id_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_allocate_table_write_id_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + throw new \Exception("allocate_table_write_id failed: unknown result"); + } + public function lock(\metastore\LockRequest $rqst) { $this->send_lock($rqst); @@ -12984,14 +13172,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size732 = 0; - $_etype735 = 0; - $xfer += $input->readListBegin($_etype735, $_size732); - for ($_i736 = 0; $_i736 < $_size732; ++$_i736) + $_size767 = 0; + $_etype770 = 0; + $xfer += $input->readListBegin($_etype770, $_size767); + for ($_i771 = 0; $_i771 < $_size767; ++$_i771) { - $elem737 = null; - $xfer += $input->readString($elem737); - $this->success []= $elem737; + $elem772 = null; + $xfer += $input->readString($elem772); + $this->success []= $elem772; } $xfer += $input->readListEnd(); } else { @@ -13027,9 +13215,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter738) + foreach ($this->success as $iter773) { - $xfer += $output->writeString($iter738); + $xfer += $output->writeString($iter773); } } $output->writeListEnd(); @@ -13160,14 +13348,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size739 = 0; - $_etype742 = 0; - $xfer += $input->readListBegin($_etype742, $_size739); - for ($_i743 = 0; $_i743 < $_size739; ++$_i743) + $_size774 = 0; + $_etype777 = 0; + $xfer += $input->readListBegin($_etype777, $_size774); + for ($_i778 = 0; $_i778 < $_size774; ++$_i778) { - $elem744 = null; - $xfer += $input->readString($elem744); - $this->success []= $elem744; + $elem779 = null; + $xfer += $input->readString($elem779); + $this->success []= $elem779; } $xfer += $input->readListEnd(); } else { @@ -13203,9 +13391,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter745) + foreach ($this->success as $iter780) { - $xfer += $output->writeString($iter745); + $xfer += $output->writeString($iter780); } } $output->writeListEnd(); @@ -14206,18 +14394,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size746 = 0; - $_ktype747 = 0; - $_vtype748 = 0; - $xfer += $input->readMapBegin($_ktype747, $_vtype748, $_size746); - for ($_i750 = 0; $_i750 < $_size746; ++$_i750) + $_size781 = 0; + $_ktype782 = 0; + $_vtype783 = 0; + $xfer += $input->readMapBegin($_ktype782, $_vtype783, $_size781); + for ($_i785 = 0; $_i785 < $_size781; ++$_i785) { - $key751 = ''; - $val752 = new \metastore\Type(); - $xfer += $input->readString($key751); - $val752 = new \metastore\Type(); - $xfer += $val752->read($input); - $this->success[$key751] = $val752; + $key786 = ''; + $val787 = new \metastore\Type(); + $xfer += $input->readString($key786); + $val787 = new \metastore\Type(); + $xfer += $val787->read($input); + $this->success[$key786] = $val787; } $xfer += $input->readMapEnd(); } else { @@ -14253,10 +14441,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter753 => $viter754) + foreach ($this->success as $kiter788 => $viter789) { - $xfer += $output->writeString($kiter753); - $xfer += $viter754->write($output); + $xfer += $output->writeString($kiter788); + $xfer += $viter789->write($output); } } $output->writeMapEnd(); @@ -14460,15 +14648,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size755 = 0; - $_etype758 = 0; - $xfer += $input->readListBegin($_etype758, $_size755); - for ($_i759 = 0; $_i759 < $_size755; ++$_i759) + $_size790 = 0; + $_etype793 = 0; + $xfer += $input->readListBegin($_etype793, $_size790); + for ($_i794 = 0; $_i794 < $_size790; ++$_i794) { - $elem760 = null; - $elem760 = new \metastore\FieldSchema(); - $xfer += $elem760->read($input); - $this->success []= $elem760; + $elem795 = null; + $elem795 = new \metastore\FieldSchema(); + $xfer += $elem795->read($input); + $this->success []= $elem795; } $xfer += $input->readListEnd(); } else { @@ -14520,9 +14708,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter761) + foreach ($this->success as $iter796) { - $xfer += $iter761->write($output); + $xfer += $iter796->write($output); } } $output->writeListEnd(); @@ -14764,15 +14952,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size762 = 0; - $_etype765 = 0; - $xfer += $input->readListBegin($_etype765, $_size762); - for ($_i766 = 0; $_i766 < $_size762; ++$_i766) + $_size797 = 0; + $_etype800 = 0; + $xfer += $input->readListBegin($_etype800, $_size797); + for ($_i801 = 0; $_i801 < $_size797; ++$_i801) { - $elem767 = null; - $elem767 = new \metastore\FieldSchema(); - $xfer += $elem767->read($input); - $this->success []= $elem767; + $elem802 = null; + $elem802 = new \metastore\FieldSchema(); + $xfer += $elem802->read($input); + $this->success []= $elem802; } $xfer += $input->readListEnd(); } else { @@ -14824,9 +15012,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter768) + foreach ($this->success as $iter803) { - $xfer += $iter768->write($output); + $xfer += $iter803->write($output); } } $output->writeListEnd(); @@ -15040,15 +15228,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size769 = 0; - $_etype772 = 0; - $xfer += $input->readListBegin($_etype772, $_size769); - for ($_i773 = 0; $_i773 < $_size769; ++$_i773) + $_size804 = 0; + $_etype807 = 0; + $xfer += $input->readListBegin($_etype807, $_size804); + for ($_i808 = 0; $_i808 < $_size804; ++$_i808) { - $elem774 = null; - $elem774 = new \metastore\FieldSchema(); - $xfer += $elem774->read($input); - $this->success []= $elem774; + $elem809 = null; + $elem809 = new \metastore\FieldSchema(); + $xfer += $elem809->read($input); + $this->success []= $elem809; } $xfer += $input->readListEnd(); } else { @@ -15100,9 +15288,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter775) + foreach ($this->success as $iter810) { - $xfer += $iter775->write($output); + $xfer += $iter810->write($output); } } $output->writeListEnd(); @@ -15344,15 +15532,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size776 = 0; - $_etype779 = 0; - $xfer += $input->readListBegin($_etype779, $_size776); - for ($_i780 = 0; $_i780 < $_size776; ++$_i780) + $_size811 = 0; + $_etype814 = 0; + $xfer += $input->readListBegin($_etype814, $_size811); + for ($_i815 = 0; $_i815 < $_size811; ++$_i815) { - $elem781 = null; - $elem781 = new \metastore\FieldSchema(); - $xfer += $elem781->read($input); - $this->success []= $elem781; + $elem816 = null; + $elem816 = new \metastore\FieldSchema(); + $xfer += $elem816->read($input); + $this->success []= $elem816; } $xfer += $input->readListEnd(); } else { @@ -15404,9 +15592,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter782) + foreach ($this->success as $iter817) { - $xfer += $iter782->write($output); + $xfer += $iter817->write($output); } } $output->writeListEnd(); @@ -16046,15 +16234,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size783 = 0; - $_etype786 = 0; - $xfer += $input->readListBegin($_etype786, $_size783); - for ($_i787 = 0; $_i787 < $_size783; ++$_i787) + $_size818 = 0; + $_etype821 = 0; + $xfer += $input->readListBegin($_etype821, $_size818); + for ($_i822 = 0; $_i822 < $_size818; ++$_i822) { - $elem788 = null; - $elem788 = new \metastore\SQLPrimaryKey(); - $xfer += $elem788->read($input); - $this->primaryKeys []= $elem788; + $elem823 = null; + $elem823 = new \metastore\SQLPrimaryKey(); + $xfer += $elem823->read($input); + $this->primaryKeys []= $elem823; } $xfer += $input->readListEnd(); } else { @@ -16064,15 +16252,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size789 = 0; - $_etype792 = 0; - $xfer += $input->readListBegin($_etype792, $_size789); - for ($_i793 = 0; $_i793 < $_size789; ++$_i793) + $_size824 = 0; + $_etype827 = 0; + $xfer += $input->readListBegin($_etype827, $_size824); + for ($_i828 = 0; $_i828 < $_size824; ++$_i828) { - $elem794 = null; - $elem794 = new \metastore\SQLForeignKey(); - $xfer += $elem794->read($input); - $this->foreignKeys []= $elem794; + $elem829 = null; + $elem829 = new \metastore\SQLForeignKey(); + $xfer += $elem829->read($input); + $this->foreignKeys []= $elem829; } $xfer += $input->readListEnd(); } else { @@ -16082,15 +16270,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size795 = 0; - $_etype798 = 0; - $xfer += $input->readListBegin($_etype798, $_size795); - for ($_i799 = 0; $_i799 < $_size795; ++$_i799) + $_size830 = 0; + $_etype833 = 0; + $xfer += $input->readListBegin($_etype833, $_size830); + for ($_i834 = 0; $_i834 < $_size830; ++$_i834) { - $elem800 = null; - $elem800 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem800->read($input); - $this->uniqueConstraints []= $elem800; + $elem835 = null; + $elem835 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem835->read($input); + $this->uniqueConstraints []= $elem835; } $xfer += $input->readListEnd(); } else { @@ -16100,15 +16288,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size801 = 0; - $_etype804 = 0; - $xfer += $input->readListBegin($_etype804, $_size801); - for ($_i805 = 0; $_i805 < $_size801; ++$_i805) + $_size836 = 0; + $_etype839 = 0; + $xfer += $input->readListBegin($_etype839, $_size836); + for ($_i840 = 0; $_i840 < $_size836; ++$_i840) { - $elem806 = null; - $elem806 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem806->read($input); - $this->notNullConstraints []= $elem806; + $elem841 = null; + $elem841 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem841->read($input); + $this->notNullConstraints []= $elem841; } $xfer += $input->readListEnd(); } else { @@ -16144,9 +16332,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter807) + foreach ($this->primaryKeys as $iter842) { - $xfer += $iter807->write($output); + $xfer += $iter842->write($output); } } $output->writeListEnd(); @@ -16161,9 +16349,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter808) + foreach ($this->foreignKeys as $iter843) { - $xfer += $iter808->write($output); + $xfer += $iter843->write($output); } } $output->writeListEnd(); @@ -16178,9 +16366,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter809) + foreach ($this->uniqueConstraints as $iter844) { - $xfer += $iter809->write($output); + $xfer += $iter844->write($output); } } $output->writeListEnd(); @@ -16195,9 +16383,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter810) + foreach ($this->notNullConstraints as $iter845) { - $xfer += $iter810->write($output); + $xfer += $iter845->write($output); } } $output->writeListEnd(); @@ -17833,14 +18021,14 @@ class ThriftHiveMetastore_truncate_table_args { case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size811 = 0; - $_etype814 = 0; - $xfer += $input->readListBegin($_etype814, $_size811); - for ($_i815 = 0; $_i815 < $_size811; ++$_i815) + $_size846 = 0; + $_etype849 = 0; + $xfer += $input->readListBegin($_etype849, $_size846); + for ($_i850 = 0; $_i850 < $_size846; ++$_i850) { - $elem816 = null; - $xfer += $input->readString($elem816); - $this->partNames []= $elem816; + $elem851 = null; + $xfer += $input->readString($elem851); + $this->partNames []= $elem851; } $xfer += $input->readListEnd(); } else { @@ -17878,9 +18066,9 @@ class ThriftHiveMetastore_truncate_table_args { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter817) + foreach ($this->partNames as $iter852) { - $xfer += $output->writeString($iter817); + $xfer += $output->writeString($iter852); } } $output->writeListEnd(); @@ -18131,14 +18319,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size818 = 0; - $_etype821 = 0; - $xfer += $input->readListBegin($_etype821, $_size818); - for ($_i822 = 0; $_i822 < $_size818; ++$_i822) + $_size853 = 0; + $_etype856 = 0; + $xfer += $input->readListBegin($_etype856, $_size853); + for ($_i857 = 0; $_i857 < $_size853; ++$_i857) { - $elem823 = null; - $xfer += $input->readString($elem823); - $this->success []= $elem823; + $elem858 = null; + $xfer += $input->readString($elem858); + $this->success []= $elem858; } $xfer += $input->readListEnd(); } else { @@ -18174,9 +18362,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter824) + foreach ($this->success as $iter859) { - $xfer += $output->writeString($iter824); + $xfer += $output->writeString($iter859); } } $output->writeListEnd(); @@ -18378,14 +18566,14 @@ class ThriftHiveMetastore_get_tables_by_type_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size825 = 0; - $_etype828 = 0; - $xfer += $input->readListBegin($_etype828, $_size825); - for ($_i829 = 0; $_i829 < $_size825; ++$_i829) + $_size860 = 0; + $_etype863 = 0; + $xfer += $input->readListBegin($_etype863, $_size860); + for ($_i864 = 0; $_i864 < $_size860; ++$_i864) { - $elem830 = null; - $xfer += $input->readString($elem830); - $this->success []= $elem830; + $elem865 = null; + $xfer += $input->readString($elem865); + $this->success []= $elem865; } $xfer += $input->readListEnd(); } else { @@ -18421,9 +18609,9 @@ class ThriftHiveMetastore_get_tables_by_type_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter831) + foreach ($this->success as $iter866) { - $xfer += $output->writeString($iter831); + $xfer += $output->writeString($iter866); } } $output->writeListEnd(); @@ -18579,14 +18767,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size832 = 0; - $_etype835 = 0; - $xfer += $input->readListBegin($_etype835, $_size832); - for ($_i836 = 0; $_i836 < $_size832; ++$_i836) + $_size867 = 0; + $_etype870 = 0; + $xfer += $input->readListBegin($_etype870, $_size867); + for ($_i871 = 0; $_i871 < $_size867; ++$_i871) { - $elem837 = null; - $xfer += $input->readString($elem837); - $this->success []= $elem837; + $elem872 = null; + $xfer += $input->readString($elem872); + $this->success []= $elem872; } $xfer += $input->readListEnd(); } else { @@ -18622,9 +18810,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter838) + foreach ($this->success as $iter873) { - $xfer += $output->writeString($iter838); + $xfer += $output->writeString($iter873); } } $output->writeListEnd(); @@ -18729,14 +18917,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size839 = 0; - $_etype842 = 0; - $xfer += $input->readListBegin($_etype842, $_size839); - for ($_i843 = 0; $_i843 < $_size839; ++$_i843) + $_size874 = 0; + $_etype877 = 0; + $xfer += $input->readListBegin($_etype877, $_size874); + for ($_i878 = 0; $_i878 < $_size874; ++$_i878) { - $elem844 = null; - $xfer += $input->readString($elem844); - $this->tbl_types []= $elem844; + $elem879 = null; + $xfer += $input->readString($elem879); + $this->tbl_types []= $elem879; } $xfer += $input->readListEnd(); } else { @@ -18774,9 +18962,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter845) + foreach ($this->tbl_types as $iter880) { - $xfer += $output->writeString($iter845); + $xfer += $output->writeString($iter880); } } $output->writeListEnd(); @@ -18853,15 +19041,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size846 = 0; - $_etype849 = 0; - $xfer += $input->readListBegin($_etype849, $_size846); - for ($_i850 = 0; $_i850 < $_size846; ++$_i850) + $_size881 = 0; + $_etype884 = 0; + $xfer += $input->readListBegin($_etype884, $_size881); + for ($_i885 = 0; $_i885 < $_size881; ++$_i885) { - $elem851 = null; - $elem851 = new \metastore\TableMeta(); - $xfer += $elem851->read($input); - $this->success []= $elem851; + $elem886 = null; + $elem886 = new \metastore\TableMeta(); + $xfer += $elem886->read($input); + $this->success []= $elem886; } $xfer += $input->readListEnd(); } else { @@ -18897,9 +19085,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter852) + foreach ($this->success as $iter887) { - $xfer += $iter852->write($output); + $xfer += $iter887->write($output); } } $output->writeListEnd(); @@ -19055,14 +19243,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size853 = 0; - $_etype856 = 0; - $xfer += $input->readListBegin($_etype856, $_size853); - for ($_i857 = 0; $_i857 < $_size853; ++$_i857) + $_size888 = 0; + $_etype891 = 0; + $xfer += $input->readListBegin($_etype891, $_size888); + for ($_i892 = 0; $_i892 < $_size888; ++$_i892) { - $elem858 = null; - $xfer += $input->readString($elem858); - $this->success []= $elem858; + $elem893 = null; + $xfer += $input->readString($elem893); + $this->success []= $elem893; } $xfer += $input->readListEnd(); } else { @@ -19098,9 +19286,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter859) + foreach ($this->success as $iter894) { - $xfer += $output->writeString($iter859); + $xfer += $output->writeString($iter894); } } $output->writeListEnd(); @@ -19415,14 +19603,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size860 = 0; - $_etype863 = 0; - $xfer += $input->readListBegin($_etype863, $_size860); - for ($_i864 = 0; $_i864 < $_size860; ++$_i864) + $_size895 = 0; + $_etype898 = 0; + $xfer += $input->readListBegin($_etype898, $_size895); + for ($_i899 = 0; $_i899 < $_size895; ++$_i899) { - $elem865 = null; - $xfer += $input->readString($elem865); - $this->tbl_names []= $elem865; + $elem900 = null; + $xfer += $input->readString($elem900); + $this->tbl_names []= $elem900; } $xfer += $input->readListEnd(); } else { @@ -19455,9 +19643,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter866) + foreach ($this->tbl_names as $iter901) { - $xfer += $output->writeString($iter866); + $xfer += $output->writeString($iter901); } } $output->writeListEnd(); @@ -19522,15 +19710,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size867 = 0; - $_etype870 = 0; - $xfer += $input->readListBegin($_etype870, $_size867); - for ($_i871 = 0; $_i871 < $_size867; ++$_i871) + $_size902 = 0; + $_etype905 = 0; + $xfer += $input->readListBegin($_etype905, $_size902); + for ($_i906 = 0; $_i906 < $_size902; ++$_i906) { - $elem872 = null; - $elem872 = new \metastore\Table(); - $xfer += $elem872->read($input); - $this->success []= $elem872; + $elem907 = null; + $elem907 = new \metastore\Table(); + $xfer += $elem907->read($input); + $this->success []= $elem907; } $xfer += $input->readListEnd(); } else { @@ -19558,9 +19746,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter873) + foreach ($this->success as $iter908) { - $xfer += $iter873->write($output); + $xfer += $iter908->write($output); } } $output->writeListEnd(); @@ -20087,14 +20275,14 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size874 = 0; - $_etype877 = 0; - $xfer += $input->readListBegin($_etype877, $_size874); - for ($_i878 = 0; $_i878 < $_size874; ++$_i878) + $_size909 = 0; + $_etype912 = 0; + $xfer += $input->readListBegin($_etype912, $_size909); + for ($_i913 = 0; $_i913 < $_size909; ++$_i913) { - $elem879 = null; - $xfer += $input->readString($elem879); - $this->tbl_names []= $elem879; + $elem914 = null; + $xfer += $input->readString($elem914); + $this->tbl_names []= $elem914; } $xfer += $input->readListEnd(); } else { @@ -20127,9 +20315,9 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter880) + foreach ($this->tbl_names as $iter915) { - $xfer += $output->writeString($iter880); + $xfer += $output->writeString($iter915); } } $output->writeListEnd(); @@ -20234,18 +20422,18 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size881 = 0; - $_ktype882 = 0; - $_vtype883 = 0; - $xfer += $input->readMapBegin($_ktype882, $_vtype883, $_size881); - for ($_i885 = 0; $_i885 < $_size881; ++$_i885) + $_size916 = 0; + $_ktype917 = 0; + $_vtype918 = 0; + $xfer += $input->readMapBegin($_ktype917, $_vtype918, $_size916); + for ($_i920 = 0; $_i920 < $_size916; ++$_i920) { - $key886 = ''; - $val887 = new \metastore\Materialization(); - $xfer += $input->readString($key886); - $val887 = new \metastore\Materialization(); - $xfer += $val887->read($input); - $this->success[$key886] = $val887; + $key921 = ''; + $val922 = new \metastore\Materialization(); + $xfer += $input->readString($key921); + $val922 = new \metastore\Materialization(); + $xfer += $val922->read($input); + $this->success[$key921] = $val922; } $xfer += $input->readMapEnd(); } else { @@ -20297,10 +20485,10 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter888 => $viter889) + foreach ($this->success as $kiter923 => $viter924) { - $xfer += $output->writeString($kiter888); - $xfer += $viter889->write($output); + $xfer += $output->writeString($kiter923); + $xfer += $viter924->write($output); } } $output->writeMapEnd(); @@ -20536,14 +20724,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size890 = 0; - $_etype893 = 0; - $xfer += $input->readListBegin($_etype893, $_size890); - for ($_i894 = 0; $_i894 < $_size890; ++$_i894) + $_size925 = 0; + $_etype928 = 0; + $xfer += $input->readListBegin($_etype928, $_size925); + for ($_i929 = 0; $_i929 < $_size925; ++$_i929) { - $elem895 = null; - $xfer += $input->readString($elem895); - $this->success []= $elem895; + $elem930 = null; + $xfer += $input->readString($elem930); + $this->success []= $elem930; } $xfer += $input->readListEnd(); } else { @@ -20595,9 +20783,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter896) + foreach ($this->success as $iter931) { - $xfer += $output->writeString($iter896); + $xfer += $output->writeString($iter931); } } $output->writeListEnd(); @@ -21910,15 +22098,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size897 = 0; - $_etype900 = 0; - $xfer += $input->readListBegin($_etype900, $_size897); - for ($_i901 = 0; $_i901 < $_size897; ++$_i901) + $_size932 = 0; + $_etype935 = 0; + $xfer += $input->readListBegin($_etype935, $_size932); + for ($_i936 = 0; $_i936 < $_size932; ++$_i936) { - $elem902 = null; - $elem902 = new \metastore\Partition(); - $xfer += $elem902->read($input); - $this->new_parts []= $elem902; + $elem937 = null; + $elem937 = new \metastore\Partition(); + $xfer += $elem937->read($input); + $this->new_parts []= $elem937; } $xfer += $input->readListEnd(); } else { @@ -21946,9 +22134,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter903) + foreach ($this->new_parts as $iter938) { - $xfer += $iter903->write($output); + $xfer += $iter938->write($output); } } $output->writeListEnd(); @@ -22163,15 +22351,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size904 = 0; - $_etype907 = 0; - $xfer += $input->readListBegin($_etype907, $_size904); - for ($_i908 = 0; $_i908 < $_size904; ++$_i908) + $_size939 = 0; + $_etype942 = 0; + $xfer += $input->readListBegin($_etype942, $_size939); + for ($_i943 = 0; $_i943 < $_size939; ++$_i943) { - $elem909 = null; - $elem909 = new \metastore\PartitionSpec(); - $xfer += $elem909->read($input); - $this->new_parts []= $elem909; + $elem944 = null; + $elem944 = new \metastore\PartitionSpec(); + $xfer += $elem944->read($input); + $this->new_parts []= $elem944; } $xfer += $input->readListEnd(); } else { @@ -22199,9 +22387,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter910) + foreach ($this->new_parts as $iter945) { - $xfer += $iter910->write($output); + $xfer += $iter945->write($output); } } $output->writeListEnd(); @@ -22451,14 +22639,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size911 = 0; - $_etype914 = 0; - $xfer += $input->readListBegin($_etype914, $_size911); - for ($_i915 = 0; $_i915 < $_size911; ++$_i915) + $_size946 = 0; + $_etype949 = 0; + $xfer += $input->readListBegin($_etype949, $_size946); + for ($_i950 = 0; $_i950 < $_size946; ++$_i950) { - $elem916 = null; - $xfer += $input->readString($elem916); - $this->part_vals []= $elem916; + $elem951 = null; + $xfer += $input->readString($elem951); + $this->part_vals []= $elem951; } $xfer += $input->readListEnd(); } else { @@ -22496,9 +22684,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter917) + foreach ($this->part_vals as $iter952) { - $xfer += $output->writeString($iter917); + $xfer += $output->writeString($iter952); } } $output->writeListEnd(); @@ -23000,14 +23188,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size918 = 0; - $_etype921 = 0; - $xfer += $input->readListBegin($_etype921, $_size918); - for ($_i922 = 0; $_i922 < $_size918; ++$_i922) + $_size953 = 0; + $_etype956 = 0; + $xfer += $input->readListBegin($_etype956, $_size953); + for ($_i957 = 0; $_i957 < $_size953; ++$_i957) { - $elem923 = null; - $xfer += $input->readString($elem923); - $this->part_vals []= $elem923; + $elem958 = null; + $xfer += $input->readString($elem958); + $this->part_vals []= $elem958; } $xfer += $input->readListEnd(); } else { @@ -23053,9 +23241,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter924) + foreach ($this->part_vals as $iter959) { - $xfer += $output->writeString($iter924); + $xfer += $output->writeString($iter959); } } $output->writeListEnd(); @@ -23909,14 +24097,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size925 = 0; - $_etype928 = 0; - $xfer += $input->readListBegin($_etype928, $_size925); - for ($_i929 = 0; $_i929 < $_size925; ++$_i929) + $_size960 = 0; + $_etype963 = 0; + $xfer += $input->readListBegin($_etype963, $_size960); + for ($_i964 = 0; $_i964 < $_size960; ++$_i964) { - $elem930 = null; - $xfer += $input->readString($elem930); - $this->part_vals []= $elem930; + $elem965 = null; + $xfer += $input->readString($elem965); + $this->part_vals []= $elem965; } $xfer += $input->readListEnd(); } else { @@ -23961,9 +24149,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter931) + foreach ($this->part_vals as $iter966) { - $xfer += $output->writeString($iter931); + $xfer += $output->writeString($iter966); } } $output->writeListEnd(); @@ -24216,14 +24404,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size932 = 0; - $_etype935 = 0; - $xfer += $input->readListBegin($_etype935, $_size932); - for ($_i936 = 0; $_i936 < $_size932; ++$_i936) + $_size967 = 0; + $_etype970 = 0; + $xfer += $input->readListBegin($_etype970, $_size967); + for ($_i971 = 0; $_i971 < $_size967; ++$_i971) { - $elem937 = null; - $xfer += $input->readString($elem937); - $this->part_vals []= $elem937; + $elem972 = null; + $xfer += $input->readString($elem972); + $this->part_vals []= $elem972; } $xfer += $input->readListEnd(); } else { @@ -24276,9 +24464,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter938) + foreach ($this->part_vals as $iter973) { - $xfer += $output->writeString($iter938); + $xfer += $output->writeString($iter973); } } $output->writeListEnd(); @@ -25292,14 +25480,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size939 = 0; - $_etype942 = 0; - $xfer += $input->readListBegin($_etype942, $_size939); - for ($_i943 = 0; $_i943 < $_size939; ++$_i943) + $_size974 = 0; + $_etype977 = 0; + $xfer += $input->readListBegin($_etype977, $_size974); + for ($_i978 = 0; $_i978 < $_size974; ++$_i978) { - $elem944 = null; - $xfer += $input->readString($elem944); - $this->part_vals []= $elem944; + $elem979 = null; + $xfer += $input->readString($elem979); + $this->part_vals []= $elem979; } $xfer += $input->readListEnd(); } else { @@ -25337,9 +25525,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter945) + foreach ($this->part_vals as $iter980) { - $xfer += $output->writeString($iter945); + $xfer += $output->writeString($iter980); } } $output->writeListEnd(); @@ -25581,17 +25769,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size946 = 0; - $_ktype947 = 0; - $_vtype948 = 0; - $xfer += $input->readMapBegin($_ktype947, $_vtype948, $_size946); - for ($_i950 = 0; $_i950 < $_size946; ++$_i950) + $_size981 = 0; + $_ktype982 = 0; + $_vtype983 = 0; + $xfer += $input->readMapBegin($_ktype982, $_vtype983, $_size981); + for ($_i985 = 0; $_i985 < $_size981; ++$_i985) { - $key951 = ''; - $val952 = ''; - $xfer += $input->readString($key951); - $xfer += $input->readString($val952); - $this->partitionSpecs[$key951] = $val952; + $key986 = ''; + $val987 = ''; + $xfer += $input->readString($key986); + $xfer += $input->readString($val987); + $this->partitionSpecs[$key986] = $val987; } $xfer += $input->readMapEnd(); } else { @@ -25647,10 +25835,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter953 => $viter954) + foreach ($this->partitionSpecs as $kiter988 => $viter989) { - $xfer += $output->writeString($kiter953); - $xfer += $output->writeString($viter954); + $xfer += $output->writeString($kiter988); + $xfer += $output->writeString($viter989); } } $output->writeMapEnd(); @@ -25962,17 +26150,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size955 = 0; - $_ktype956 = 0; - $_vtype957 = 0; - $xfer += $input->readMapBegin($_ktype956, $_vtype957, $_size955); - for ($_i959 = 0; $_i959 < $_size955; ++$_i959) + $_size990 = 0; + $_ktype991 = 0; + $_vtype992 = 0; + $xfer += $input->readMapBegin($_ktype991, $_vtype992, $_size990); + for ($_i994 = 0; $_i994 < $_size990; ++$_i994) { - $key960 = ''; - $val961 = ''; - $xfer += $input->readString($key960); - $xfer += $input->readString($val961); - $this->partitionSpecs[$key960] = $val961; + $key995 = ''; + $val996 = ''; + $xfer += $input->readString($key995); + $xfer += $input->readString($val996); + $this->partitionSpecs[$key995] = $val996; } $xfer += $input->readMapEnd(); } else { @@ -26028,10 +26216,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter962 => $viter963) + foreach ($this->partitionSpecs as $kiter997 => $viter998) { - $xfer += $output->writeString($kiter962); - $xfer += $output->writeString($viter963); + $xfer += $output->writeString($kiter997); + $xfer += $output->writeString($viter998); } } $output->writeMapEnd(); @@ -26164,15 +26352,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size964 = 0; - $_etype967 = 0; - $xfer += $input->readListBegin($_etype967, $_size964); - for ($_i968 = 0; $_i968 < $_size964; ++$_i968) + $_size999 = 0; + $_etype1002 = 0; + $xfer += $input->readListBegin($_etype1002, $_size999); + for ($_i1003 = 0; $_i1003 < $_size999; ++$_i1003) { - $elem969 = null; - $elem969 = new \metastore\Partition(); - $xfer += $elem969->read($input); - $this->success []= $elem969; + $elem1004 = null; + $elem1004 = new \metastore\Partition(); + $xfer += $elem1004->read($input); + $this->success []= $elem1004; } $xfer += $input->readListEnd(); } else { @@ -26232,9 +26420,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter970) + foreach ($this->success as $iter1005) { - $xfer += $iter970->write($output); + $xfer += $iter1005->write($output); } } $output->writeListEnd(); @@ -26380,14 +26568,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size971 = 0; - $_etype974 = 0; - $xfer += $input->readListBegin($_etype974, $_size971); - for ($_i975 = 0; $_i975 < $_size971; ++$_i975) + $_size1006 = 0; + $_etype1009 = 0; + $xfer += $input->readListBegin($_etype1009, $_size1006); + for ($_i1010 = 0; $_i1010 < $_size1006; ++$_i1010) { - $elem976 = null; - $xfer += $input->readString($elem976); - $this->part_vals []= $elem976; + $elem1011 = null; + $xfer += $input->readString($elem1011); + $this->part_vals []= $elem1011; } $xfer += $input->readListEnd(); } else { @@ -26404,14 +26592,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size977 = 0; - $_etype980 = 0; - $xfer += $input->readListBegin($_etype980, $_size977); - for ($_i981 = 0; $_i981 < $_size977; ++$_i981) + $_size1012 = 0; + $_etype1015 = 0; + $xfer += $input->readListBegin($_etype1015, $_size1012); + for ($_i1016 = 0; $_i1016 < $_size1012; ++$_i1016) { - $elem982 = null; - $xfer += $input->readString($elem982); - $this->group_names []= $elem982; + $elem1017 = null; + $xfer += $input->readString($elem1017); + $this->group_names []= $elem1017; } $xfer += $input->readListEnd(); } else { @@ -26449,9 +26637,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter983) + foreach ($this->part_vals as $iter1018) { - $xfer += $output->writeString($iter983); + $xfer += $output->writeString($iter1018); } } $output->writeListEnd(); @@ -26471,9 +26659,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter984) + foreach ($this->group_names as $iter1019) { - $xfer += $output->writeString($iter984); + $xfer += $output->writeString($iter1019); } } $output->writeListEnd(); @@ -27064,15 +27252,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size985 = 0; - $_etype988 = 0; - $xfer += $input->readListBegin($_etype988, $_size985); - for ($_i989 = 0; $_i989 < $_size985; ++$_i989) + $_size1020 = 0; + $_etype1023 = 0; + $xfer += $input->readListBegin($_etype1023, $_size1020); + for ($_i1024 = 0; $_i1024 < $_size1020; ++$_i1024) { - $elem990 = null; - $elem990 = new \metastore\Partition(); - $xfer += $elem990->read($input); - $this->success []= $elem990; + $elem1025 = null; + $elem1025 = new \metastore\Partition(); + $xfer += $elem1025->read($input); + $this->success []= $elem1025; } $xfer += $input->readListEnd(); } else { @@ -27116,9 +27304,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter991) + foreach ($this->success as $iter1026) { - $xfer += $iter991->write($output); + $xfer += $iter1026->write($output); } } $output->writeListEnd(); @@ -27264,14 +27452,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size992 = 0; - $_etype995 = 0; - $xfer += $input->readListBegin($_etype995, $_size992); - for ($_i996 = 0; $_i996 < $_size992; ++$_i996) + $_size1027 = 0; + $_etype1030 = 0; + $xfer += $input->readListBegin($_etype1030, $_size1027); + for ($_i1031 = 0; $_i1031 < $_size1027; ++$_i1031) { - $elem997 = null; - $xfer += $input->readString($elem997); - $this->group_names []= $elem997; + $elem1032 = null; + $xfer += $input->readString($elem1032); + $this->group_names []= $elem1032; } $xfer += $input->readListEnd(); } else { @@ -27319,9 +27507,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter998) + foreach ($this->group_names as $iter1033) { - $xfer += $output->writeString($iter998); + $xfer += $output->writeString($iter1033); } } $output->writeListEnd(); @@ -27410,15 +27598,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size999 = 0; - $_etype1002 = 0; - $xfer += $input->readListBegin($_etype1002, $_size999); - for ($_i1003 = 0; $_i1003 < $_size999; ++$_i1003) + $_size1034 = 0; + $_etype1037 = 0; + $xfer += $input->readListBegin($_etype1037, $_size1034); + for ($_i1038 = 0; $_i1038 < $_size1034; ++$_i1038) { - $elem1004 = null; - $elem1004 = new \metastore\Partition(); - $xfer += $elem1004->read($input); - $this->success []= $elem1004; + $elem1039 = null; + $elem1039 = new \metastore\Partition(); + $xfer += $elem1039->read($input); + $this->success []= $elem1039; } $xfer += $input->readListEnd(); } else { @@ -27462,9 +27650,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1005) + foreach ($this->success as $iter1040) { - $xfer += $iter1005->write($output); + $xfer += $iter1040->write($output); } } $output->writeListEnd(); @@ -27684,15 +27872,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1006 = 0; - $_etype1009 = 0; - $xfer += $input->readListBegin($_etype1009, $_size1006); - for ($_i1010 = 0; $_i1010 < $_size1006; ++$_i1010) + $_size1041 = 0; + $_etype1044 = 0; + $xfer += $input->readListBegin($_etype1044, $_size1041); + for ($_i1045 = 0; $_i1045 < $_size1041; ++$_i1045) { - $elem1011 = null; - $elem1011 = new \metastore\PartitionSpec(); - $xfer += $elem1011->read($input); - $this->success []= $elem1011; + $elem1046 = null; + $elem1046 = new \metastore\PartitionSpec(); + $xfer += $elem1046->read($input); + $this->success []= $elem1046; } $xfer += $input->readListEnd(); } else { @@ -27736,9 +27924,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1012) + foreach ($this->success as $iter1047) { - $xfer += $iter1012->write($output); + $xfer += $iter1047->write($output); } } $output->writeListEnd(); @@ -27957,14 +28145,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1013 = 0; - $_etype1016 = 0; - $xfer += $input->readListBegin($_etype1016, $_size1013); - for ($_i1017 = 0; $_i1017 < $_size1013; ++$_i1017) + $_size1048 = 0; + $_etype1051 = 0; + $xfer += $input->readListBegin($_etype1051, $_size1048); + for ($_i1052 = 0; $_i1052 < $_size1048; ++$_i1052) { - $elem1018 = null; - $xfer += $input->readString($elem1018); - $this->success []= $elem1018; + $elem1053 = null; + $xfer += $input->readString($elem1053); + $this->success []= $elem1053; } $xfer += $input->readListEnd(); } else { @@ -28008,9 +28196,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1019) + foreach ($this->success as $iter1054) { - $xfer += $output->writeString($iter1019); + $xfer += $output->writeString($iter1054); } } $output->writeListEnd(); @@ -28341,14 +28529,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1020 = 0; - $_etype1023 = 0; - $xfer += $input->readListBegin($_etype1023, $_size1020); - for ($_i1024 = 0; $_i1024 < $_size1020; ++$_i1024) + $_size1055 = 0; + $_etype1058 = 0; + $xfer += $input->readListBegin($_etype1058, $_size1055); + for ($_i1059 = 0; $_i1059 < $_size1055; ++$_i1059) { - $elem1025 = null; - $xfer += $input->readString($elem1025); - $this->part_vals []= $elem1025; + $elem1060 = null; + $xfer += $input->readString($elem1060); + $this->part_vals []= $elem1060; } $xfer += $input->readListEnd(); } else { @@ -28393,9 +28581,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1026) + foreach ($this->part_vals as $iter1061) { - $xfer += $output->writeString($iter1026); + $xfer += $output->writeString($iter1061); } } $output->writeListEnd(); @@ -28489,15 +28677,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1027 = 0; - $_etype1030 = 0; - $xfer += $input->readListBegin($_etype1030, $_size1027); - for ($_i1031 = 0; $_i1031 < $_size1027; ++$_i1031) + $_size1062 = 0; + $_etype1065 = 0; + $xfer += $input->readListBegin($_etype1065, $_size1062); + for ($_i1066 = 0; $_i1066 < $_size1062; ++$_i1066) { - $elem1032 = null; - $elem1032 = new \metastore\Partition(); - $xfer += $elem1032->read($input); - $this->success []= $elem1032; + $elem1067 = null; + $elem1067 = new \metastore\Partition(); + $xfer += $elem1067->read($input); + $this->success []= $elem1067; } $xfer += $input->readListEnd(); } else { @@ -28541,9 +28729,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1033) + foreach ($this->success as $iter1068) { - $xfer += $iter1033->write($output); + $xfer += $iter1068->write($output); } } $output->writeListEnd(); @@ -28690,14 +28878,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1034 = 0; - $_etype1037 = 0; - $xfer += $input->readListBegin($_etype1037, $_size1034); - for ($_i1038 = 0; $_i1038 < $_size1034; ++$_i1038) + $_size1069 = 0; + $_etype1072 = 0; + $xfer += $input->readListBegin($_etype1072, $_size1069); + for ($_i1073 = 0; $_i1073 < $_size1069; ++$_i1073) { - $elem1039 = null; - $xfer += $input->readString($elem1039); - $this->part_vals []= $elem1039; + $elem1074 = null; + $xfer += $input->readString($elem1074); + $this->part_vals []= $elem1074; } $xfer += $input->readListEnd(); } else { @@ -28721,14 +28909,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1040 = 0; - $_etype1043 = 0; - $xfer += $input->readListBegin($_etype1043, $_size1040); - for ($_i1044 = 0; $_i1044 < $_size1040; ++$_i1044) + $_size1075 = 0; + $_etype1078 = 0; + $xfer += $input->readListBegin($_etype1078, $_size1075); + for ($_i1079 = 0; $_i1079 < $_size1075; ++$_i1079) { - $elem1045 = null; - $xfer += $input->readString($elem1045); - $this->group_names []= $elem1045; + $elem1080 = null; + $xfer += $input->readString($elem1080); + $this->group_names []= $elem1080; } $xfer += $input->readListEnd(); } else { @@ -28766,9 +28954,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1046) + foreach ($this->part_vals as $iter1081) { - $xfer += $output->writeString($iter1046); + $xfer += $output->writeString($iter1081); } } $output->writeListEnd(); @@ -28793,9 +28981,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1047) + foreach ($this->group_names as $iter1082) { - $xfer += $output->writeString($iter1047); + $xfer += $output->writeString($iter1082); } } $output->writeListEnd(); @@ -28884,15 +29072,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1048 = 0; - $_etype1051 = 0; - $xfer += $input->readListBegin($_etype1051, $_size1048); - for ($_i1052 = 0; $_i1052 < $_size1048; ++$_i1052) + $_size1083 = 0; + $_etype1086 = 0; + $xfer += $input->readListBegin($_etype1086, $_size1083); + for ($_i1087 = 0; $_i1087 < $_size1083; ++$_i1087) { - $elem1053 = null; - $elem1053 = new \metastore\Partition(); - $xfer += $elem1053->read($input); - $this->success []= $elem1053; + $elem1088 = null; + $elem1088 = new \metastore\Partition(); + $xfer += $elem1088->read($input); + $this->success []= $elem1088; } $xfer += $input->readListEnd(); } else { @@ -28936,9 +29124,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1054) + foreach ($this->success as $iter1089) { - $xfer += $iter1054->write($output); + $xfer += $iter1089->write($output); } } $output->writeListEnd(); @@ -29059,14 +29247,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1055 = 0; - $_etype1058 = 0; - $xfer += $input->readListBegin($_etype1058, $_size1055); - for ($_i1059 = 0; $_i1059 < $_size1055; ++$_i1059) + $_size1090 = 0; + $_etype1093 = 0; + $xfer += $input->readListBegin($_etype1093, $_size1090); + for ($_i1094 = 0; $_i1094 < $_size1090; ++$_i1094) { - $elem1060 = null; - $xfer += $input->readString($elem1060); - $this->part_vals []= $elem1060; + $elem1095 = null; + $xfer += $input->readString($elem1095); + $this->part_vals []= $elem1095; } $xfer += $input->readListEnd(); } else { @@ -29111,9 +29299,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1061) + foreach ($this->part_vals as $iter1096) { - $xfer += $output->writeString($iter1061); + $xfer += $output->writeString($iter1096); } } $output->writeListEnd(); @@ -29206,14 +29394,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1062 = 0; - $_etype1065 = 0; - $xfer += $input->readListBegin($_etype1065, $_size1062); - for ($_i1066 = 0; $_i1066 < $_size1062; ++$_i1066) + $_size1097 = 0; + $_etype1100 = 0; + $xfer += $input->readListBegin($_etype1100, $_size1097); + for ($_i1101 = 0; $_i1101 < $_size1097; ++$_i1101) { - $elem1067 = null; - $xfer += $input->readString($elem1067); - $this->success []= $elem1067; + $elem1102 = null; + $xfer += $input->readString($elem1102); + $this->success []= $elem1102; } $xfer += $input->readListEnd(); } else { @@ -29257,9 +29445,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1068) + foreach ($this->success as $iter1103) { - $xfer += $output->writeString($iter1068); + $xfer += $output->writeString($iter1103); } } $output->writeListEnd(); @@ -29502,15 +29690,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1069 = 0; - $_etype1072 = 0; - $xfer += $input->readListBegin($_etype1072, $_size1069); - for ($_i1073 = 0; $_i1073 < $_size1069; ++$_i1073) + $_size1104 = 0; + $_etype1107 = 0; + $xfer += $input->readListBegin($_etype1107, $_size1104); + for ($_i1108 = 0; $_i1108 < $_size1104; ++$_i1108) { - $elem1074 = null; - $elem1074 = new \metastore\Partition(); - $xfer += $elem1074->read($input); - $this->success []= $elem1074; + $elem1109 = null; + $elem1109 = new \metastore\Partition(); + $xfer += $elem1109->read($input); + $this->success []= $elem1109; } $xfer += $input->readListEnd(); } else { @@ -29554,9 +29742,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1075) + foreach ($this->success as $iter1110) { - $xfer += $iter1075->write($output); + $xfer += $iter1110->write($output); } } $output->writeListEnd(); @@ -29799,15 +29987,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1076 = 0; - $_etype1079 = 0; - $xfer += $input->readListBegin($_etype1079, $_size1076); - for ($_i1080 = 0; $_i1080 < $_size1076; ++$_i1080) + $_size1111 = 0; + $_etype1114 = 0; + $xfer += $input->readListBegin($_etype1114, $_size1111); + for ($_i1115 = 0; $_i1115 < $_size1111; ++$_i1115) { - $elem1081 = null; - $elem1081 = new \metastore\PartitionSpec(); - $xfer += $elem1081->read($input); - $this->success []= $elem1081; + $elem1116 = null; + $elem1116 = new \metastore\PartitionSpec(); + $xfer += $elem1116->read($input); + $this->success []= $elem1116; } $xfer += $input->readListEnd(); } else { @@ -29851,9 +30039,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1082) + foreach ($this->success as $iter1117) { - $xfer += $iter1082->write($output); + $xfer += $iter1117->write($output); } } $output->writeListEnd(); @@ -30419,14 +30607,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1083 = 0; - $_etype1086 = 0; - $xfer += $input->readListBegin($_etype1086, $_size1083); - for ($_i1087 = 0; $_i1087 < $_size1083; ++$_i1087) + $_size1118 = 0; + $_etype1121 = 0; + $xfer += $input->readListBegin($_etype1121, $_size1118); + for ($_i1122 = 0; $_i1122 < $_size1118; ++$_i1122) { - $elem1088 = null; - $xfer += $input->readString($elem1088); - $this->names []= $elem1088; + $elem1123 = null; + $xfer += $input->readString($elem1123); + $this->names []= $elem1123; } $xfer += $input->readListEnd(); } else { @@ -30464,9 +30652,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1089) + foreach ($this->names as $iter1124) { - $xfer += $output->writeString($iter1089); + $xfer += $output->writeString($iter1124); } } $output->writeListEnd(); @@ -30555,15 +30743,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1090 = 0; - $_etype1093 = 0; - $xfer += $input->readListBegin($_etype1093, $_size1090); - for ($_i1094 = 0; $_i1094 < $_size1090; ++$_i1094) + $_size1125 = 0; + $_etype1128 = 0; + $xfer += $input->readListBegin($_etype1128, $_size1125); + for ($_i1129 = 0; $_i1129 < $_size1125; ++$_i1129) { - $elem1095 = null; - $elem1095 = new \metastore\Partition(); - $xfer += $elem1095->read($input); - $this->success []= $elem1095; + $elem1130 = null; + $elem1130 = new \metastore\Partition(); + $xfer += $elem1130->read($input); + $this->success []= $elem1130; } $xfer += $input->readListEnd(); } else { @@ -30607,9 +30795,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1096) + foreach ($this->success as $iter1131) { - $xfer += $iter1096->write($output); + $xfer += $iter1131->write($output); } } $output->writeListEnd(); @@ -30948,15 +31136,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1097 = 0; - $_etype1100 = 0; - $xfer += $input->readListBegin($_etype1100, $_size1097); - for ($_i1101 = 0; $_i1101 < $_size1097; ++$_i1101) + $_size1132 = 0; + $_etype1135 = 0; + $xfer += $input->readListBegin($_etype1135, $_size1132); + for ($_i1136 = 0; $_i1136 < $_size1132; ++$_i1136) { - $elem1102 = null; - $elem1102 = new \metastore\Partition(); - $xfer += $elem1102->read($input); - $this->new_parts []= $elem1102; + $elem1137 = null; + $elem1137 = new \metastore\Partition(); + $xfer += $elem1137->read($input); + $this->new_parts []= $elem1137; } $xfer += $input->readListEnd(); } else { @@ -30994,9 +31182,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1103) + foreach ($this->new_parts as $iter1138) { - $xfer += $iter1103->write($output); + $xfer += $iter1138->write($output); } } $output->writeListEnd(); @@ -31211,15 +31399,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1104 = 0; - $_etype1107 = 0; - $xfer += $input->readListBegin($_etype1107, $_size1104); - for ($_i1108 = 0; $_i1108 < $_size1104; ++$_i1108) + $_size1139 = 0; + $_etype1142 = 0; + $xfer += $input->readListBegin($_etype1142, $_size1139); + for ($_i1143 = 0; $_i1143 < $_size1139; ++$_i1143) { - $elem1109 = null; - $elem1109 = new \metastore\Partition(); - $xfer += $elem1109->read($input); - $this->new_parts []= $elem1109; + $elem1144 = null; + $elem1144 = new \metastore\Partition(); + $xfer += $elem1144->read($input); + $this->new_parts []= $elem1144; } $xfer += $input->readListEnd(); } else { @@ -31265,9 +31453,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1110) + foreach ($this->new_parts as $iter1145) { - $xfer += $iter1110->write($output); + $xfer += $iter1145->write($output); } } $output->writeListEnd(); @@ -31745,14 +31933,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1111 = 0; - $_etype1114 = 0; - $xfer += $input->readListBegin($_etype1114, $_size1111); - for ($_i1115 = 0; $_i1115 < $_size1111; ++$_i1115) + $_size1146 = 0; + $_etype1149 = 0; + $xfer += $input->readListBegin($_etype1149, $_size1146); + for ($_i1150 = 0; $_i1150 < $_size1146; ++$_i1150) { - $elem1116 = null; - $xfer += $input->readString($elem1116); - $this->part_vals []= $elem1116; + $elem1151 = null; + $xfer += $input->readString($elem1151); + $this->part_vals []= $elem1151; } $xfer += $input->readListEnd(); } else { @@ -31798,9 +31986,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1117) + foreach ($this->part_vals as $iter1152) { - $xfer += $output->writeString($iter1117); + $xfer += $output->writeString($iter1152); } } $output->writeListEnd(); @@ -31985,14 +32173,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1118 = 0; - $_etype1121 = 0; - $xfer += $input->readListBegin($_etype1121, $_size1118); - for ($_i1122 = 0; $_i1122 < $_size1118; ++$_i1122) + $_size1153 = 0; + $_etype1156 = 0; + $xfer += $input->readListBegin($_etype1156, $_size1153); + for ($_i1157 = 0; $_i1157 < $_size1153; ++$_i1157) { - $elem1123 = null; - $xfer += $input->readString($elem1123); - $this->part_vals []= $elem1123; + $elem1158 = null; + $xfer += $input->readString($elem1158); + $this->part_vals []= $elem1158; } $xfer += $input->readListEnd(); } else { @@ -32027,9 +32215,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1124) + foreach ($this->part_vals as $iter1159) { - $xfer += $output->writeString($iter1124); + $xfer += $output->writeString($iter1159); } } $output->writeListEnd(); @@ -32483,14 +32671,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1125 = 0; - $_etype1128 = 0; - $xfer += $input->readListBegin($_etype1128, $_size1125); - for ($_i1129 = 0; $_i1129 < $_size1125; ++$_i1129) + $_size1160 = 0; + $_etype1163 = 0; + $xfer += $input->readListBegin($_etype1163, $_size1160); + for ($_i1164 = 0; $_i1164 < $_size1160; ++$_i1164) { - $elem1130 = null; - $xfer += $input->readString($elem1130); - $this->success []= $elem1130; + $elem1165 = null; + $xfer += $input->readString($elem1165); + $this->success []= $elem1165; } $xfer += $input->readListEnd(); } else { @@ -32526,9 +32714,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1131) + foreach ($this->success as $iter1166) { - $xfer += $output->writeString($iter1131); + $xfer += $output->writeString($iter1166); } } $output->writeListEnd(); @@ -32688,17 +32876,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1132 = 0; - $_ktype1133 = 0; - $_vtype1134 = 0; - $xfer += $input->readMapBegin($_ktype1133, $_vtype1134, $_size1132); - for ($_i1136 = 0; $_i1136 < $_size1132; ++$_i1136) + $_size1167 = 0; + $_ktype1168 = 0; + $_vtype1169 = 0; + $xfer += $input->readMapBegin($_ktype1168, $_vtype1169, $_size1167); + for ($_i1171 = 0; $_i1171 < $_size1167; ++$_i1171) { - $key1137 = ''; - $val1138 = ''; - $xfer += $input->readString($key1137); - $xfer += $input->readString($val1138); - $this->success[$key1137] = $val1138; + $key1172 = ''; + $val1173 = ''; + $xfer += $input->readString($key1172); + $xfer += $input->readString($val1173); + $this->success[$key1172] = $val1173; } $xfer += $input->readMapEnd(); } else { @@ -32734,10 +32922,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1139 => $viter1140) + foreach ($this->success as $kiter1174 => $viter1175) { - $xfer += $output->writeString($kiter1139); - $xfer += $output->writeString($viter1140); + $xfer += $output->writeString($kiter1174); + $xfer += $output->writeString($viter1175); } } $output->writeMapEnd(); @@ -32857,17 +33045,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1141 = 0; - $_ktype1142 = 0; - $_vtype1143 = 0; - $xfer += $input->readMapBegin($_ktype1142, $_vtype1143, $_size1141); - for ($_i1145 = 0; $_i1145 < $_size1141; ++$_i1145) + $_size1176 = 0; + $_ktype1177 = 0; + $_vtype1178 = 0; + $xfer += $input->readMapBegin($_ktype1177, $_vtype1178, $_size1176); + for ($_i1180 = 0; $_i1180 < $_size1176; ++$_i1180) { - $key1146 = ''; - $val1147 = ''; - $xfer += $input->readString($key1146); - $xfer += $input->readString($val1147); - $this->part_vals[$key1146] = $val1147; + $key1181 = ''; + $val1182 = ''; + $xfer += $input->readString($key1181); + $xfer += $input->readString($val1182); + $this->part_vals[$key1181] = $val1182; } $xfer += $input->readMapEnd(); } else { @@ -32912,10 +33100,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1148 => $viter1149) + foreach ($this->part_vals as $kiter1183 => $viter1184) { - $xfer += $output->writeString($kiter1148); - $xfer += $output->writeString($viter1149); + $xfer += $output->writeString($kiter1183); + $xfer += $output->writeString($viter1184); } } $output->writeMapEnd(); @@ -33237,17 +33425,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1150 = 0; - $_ktype1151 = 0; - $_vtype1152 = 0; - $xfer += $input->readMapBegin($_ktype1151, $_vtype1152, $_size1150); - for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154) + $_size1185 = 0; + $_ktype1186 = 0; + $_vtype1187 = 0; + $xfer += $input->readMapBegin($_ktype1186, $_vtype1187, $_size1185); + for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) { - $key1155 = ''; - $val1156 = ''; - $xfer += $input->readString($key1155); - $xfer += $input->readString($val1156); - $this->part_vals[$key1155] = $val1156; + $key1190 = ''; + $val1191 = ''; + $xfer += $input->readString($key1190); + $xfer += $input->readString($val1191); + $this->part_vals[$key1190] = $val1191; } $xfer += $input->readMapEnd(); } else { @@ -33292,10 +33480,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1157 => $viter1158) + foreach ($this->part_vals as $kiter1192 => $viter1193) { - $xfer += $output->writeString($kiter1157); - $xfer += $output->writeString($viter1158); + $xfer += $output->writeString($kiter1192); + $xfer += $output->writeString($viter1193); } } $output->writeMapEnd(); @@ -34769,15 +34957,15 @@ class ThriftHiveMetastore_get_indexes_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1159 = 0; - $_etype1162 = 0; - $xfer += $input->readListBegin($_etype1162, $_size1159); - for ($_i1163 = 0; $_i1163 < $_size1159; ++$_i1163) + $_size1194 = 0; + $_etype1197 = 0; + $xfer += $input->readListBegin($_etype1197, $_size1194); + for ($_i1198 = 0; $_i1198 < $_size1194; ++$_i1198) { - $elem1164 = null; - $elem1164 = new \metastore\Index(); - $xfer += $elem1164->read($input); - $this->success []= $elem1164; + $elem1199 = null; + $elem1199 = new \metastore\Index(); + $xfer += $elem1199->read($input); + $this->success []= $elem1199; } $xfer += $input->readListEnd(); } else { @@ -34821,9 +35009,9 @@ class ThriftHiveMetastore_get_indexes_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1165) + foreach ($this->success as $iter1200) { - $xfer += $iter1165->write($output); + $xfer += $iter1200->write($output); } } $output->writeListEnd(); @@ -35030,14 +35218,14 @@ class ThriftHiveMetastore_get_index_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1166 = 0; - $_etype1169 = 0; - $xfer += $input->readListBegin($_etype1169, $_size1166); - for ($_i1170 = 0; $_i1170 < $_size1166; ++$_i1170) + $_size1201 = 0; + $_etype1204 = 0; + $xfer += $input->readListBegin($_etype1204, $_size1201); + for ($_i1205 = 0; $_i1205 < $_size1201; ++$_i1205) { - $elem1171 = null; - $xfer += $input->readString($elem1171); - $this->success []= $elem1171; + $elem1206 = null; + $xfer += $input->readString($elem1206); + $this->success []= $elem1206; } $xfer += $input->readListEnd(); } else { @@ -35073,9 +35261,9 @@ class ThriftHiveMetastore_get_index_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1172) + foreach ($this->success as $iter1207) { - $xfer += $output->writeString($iter1172); + $xfer += $output->writeString($iter1207); } } $output->writeListEnd(); @@ -39389,14 +39577,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1173 = 0; - $_etype1176 = 0; - $xfer += $input->readListBegin($_etype1176, $_size1173); - for ($_i1177 = 0; $_i1177 < $_size1173; ++$_i1177) + $_size1208 = 0; + $_etype1211 = 0; + $xfer += $input->readListBegin($_etype1211, $_size1208); + for ($_i1212 = 0; $_i1212 < $_size1208; ++$_i1212) { - $elem1178 = null; - $xfer += $input->readString($elem1178); - $this->success []= $elem1178; + $elem1213 = null; + $xfer += $input->readString($elem1213); + $this->success []= $elem1213; } $xfer += $input->readListEnd(); } else { @@ -39432,9 +39620,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1179) + foreach ($this->success as $iter1214) { - $xfer += $output->writeString($iter1179); + $xfer += $output->writeString($iter1214); } } $output->writeListEnd(); @@ -40303,14 +40491,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1180 = 0; - $_etype1183 = 0; - $xfer += $input->readListBegin($_etype1183, $_size1180); - for ($_i1184 = 0; $_i1184 < $_size1180; ++$_i1184) + $_size1215 = 0; + $_etype1218 = 0; + $xfer += $input->readListBegin($_etype1218, $_size1215); + for ($_i1219 = 0; $_i1219 < $_size1215; ++$_i1219) { - $elem1185 = null; - $xfer += $input->readString($elem1185); - $this->success []= $elem1185; + $elem1220 = null; + $xfer += $input->readString($elem1220); + $this->success []= $elem1220; } $xfer += $input->readListEnd(); } else { @@ -40346,9 +40534,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1186) + foreach ($this->success as $iter1221) { - $xfer += $output->writeString($iter1186); + $xfer += $output->writeString($iter1221); } } $output->writeListEnd(); @@ -41039,15 +41227,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1187 = 0; - $_etype1190 = 0; - $xfer += $input->readListBegin($_etype1190, $_size1187); - for ($_i1191 = 0; $_i1191 < $_size1187; ++$_i1191) + $_size1222 = 0; + $_etype1225 = 0; + $xfer += $input->readListBegin($_etype1225, $_size1222); + for ($_i1226 = 0; $_i1226 < $_size1222; ++$_i1226) { - $elem1192 = null; - $elem1192 = new \metastore\Role(); - $xfer += $elem1192->read($input); - $this->success []= $elem1192; + $elem1227 = null; + $elem1227 = new \metastore\Role(); + $xfer += $elem1227->read($input); + $this->success []= $elem1227; } $xfer += $input->readListEnd(); } else { @@ -41083,9 +41271,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1193) + foreach ($this->success as $iter1228) { - $xfer += $iter1193->write($output); + $xfer += $iter1228->write($output); } } $output->writeListEnd(); @@ -41747,14 +41935,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1194 = 0; - $_etype1197 = 0; - $xfer += $input->readListBegin($_etype1197, $_size1194); - for ($_i1198 = 0; $_i1198 < $_size1194; ++$_i1198) + $_size1229 = 0; + $_etype1232 = 0; + $xfer += $input->readListBegin($_etype1232, $_size1229); + for ($_i1233 = 0; $_i1233 < $_size1229; ++$_i1233) { - $elem1199 = null; - $xfer += $input->readString($elem1199); - $this->group_names []= $elem1199; + $elem1234 = null; + $xfer += $input->readString($elem1234); + $this->group_names []= $elem1234; } $xfer += $input->readListEnd(); } else { @@ -41795,9 +41983,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1200) + foreach ($this->group_names as $iter1235) { - $xfer += $output->writeString($iter1200); + $xfer += $output->writeString($iter1235); } } $output->writeListEnd(); @@ -42105,15 +42293,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1201 = 0; - $_etype1204 = 0; - $xfer += $input->readListBegin($_etype1204, $_size1201); - for ($_i1205 = 0; $_i1205 < $_size1201; ++$_i1205) + $_size1236 = 0; + $_etype1239 = 0; + $xfer += $input->readListBegin($_etype1239, $_size1236); + for ($_i1240 = 0; $_i1240 < $_size1236; ++$_i1240) { - $elem1206 = null; - $elem1206 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1206->read($input); - $this->success []= $elem1206; + $elem1241 = null; + $elem1241 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1241->read($input); + $this->success []= $elem1241; } $xfer += $input->readListEnd(); } else { @@ -42149,9 +42337,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1207) + foreach ($this->success as $iter1242) { - $xfer += $iter1207->write($output); + $xfer += $iter1242->write($output); } } $output->writeListEnd(); @@ -42783,14 +42971,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1208 = 0; - $_etype1211 = 0; - $xfer += $input->readListBegin($_etype1211, $_size1208); - for ($_i1212 = 0; $_i1212 < $_size1208; ++$_i1212) + $_size1243 = 0; + $_etype1246 = 0; + $xfer += $input->readListBegin($_etype1246, $_size1243); + for ($_i1247 = 0; $_i1247 < $_size1243; ++$_i1247) { - $elem1213 = null; - $xfer += $input->readString($elem1213); - $this->group_names []= $elem1213; + $elem1248 = null; + $xfer += $input->readString($elem1248); + $this->group_names []= $elem1248; } $xfer += $input->readListEnd(); } else { @@ -42823,9 +43011,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1214) + foreach ($this->group_names as $iter1249) { - $xfer += $output->writeString($iter1214); + $xfer += $output->writeString($iter1249); } } $output->writeListEnd(); @@ -42901,14 +43089,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1215 = 0; - $_etype1218 = 0; - $xfer += $input->readListBegin($_etype1218, $_size1215); - for ($_i1219 = 0; $_i1219 < $_size1215; ++$_i1219) + $_size1250 = 0; + $_etype1253 = 0; + $xfer += $input->readListBegin($_etype1253, $_size1250); + for ($_i1254 = 0; $_i1254 < $_size1250; ++$_i1254) { - $elem1220 = null; - $xfer += $input->readString($elem1220); - $this->success []= $elem1220; + $elem1255 = null; + $xfer += $input->readString($elem1255); + $this->success []= $elem1255; } $xfer += $input->readListEnd(); } else { @@ -42944,9 +43132,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1221) + foreach ($this->success as $iter1256) { - $xfer += $output->writeString($iter1221); + $xfer += $output->writeString($iter1256); } } $output->writeListEnd(); @@ -44063,14 +44251,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1222 = 0; - $_etype1225 = 0; - $xfer += $input->readListBegin($_etype1225, $_size1222); - for ($_i1226 = 0; $_i1226 < $_size1222; ++$_i1226) + $_size1257 = 0; + $_etype1260 = 0; + $xfer += $input->readListBegin($_etype1260, $_size1257); + for ($_i1261 = 0; $_i1261 < $_size1257; ++$_i1261) { - $elem1227 = null; - $xfer += $input->readString($elem1227); - $this->success []= $elem1227; + $elem1262 = null; + $xfer += $input->readString($elem1262); + $this->success []= $elem1262; } $xfer += $input->readListEnd(); } else { @@ -44098,9 +44286,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1228) + foreach ($this->success as $iter1263) { - $xfer += $output->writeString($iter1228); + $xfer += $output->writeString($iter1263); } } $output->writeListEnd(); @@ -44739,14 +44927,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1229 = 0; - $_etype1232 = 0; - $xfer += $input->readListBegin($_etype1232, $_size1229); - for ($_i1233 = 0; $_i1233 < $_size1229; ++$_i1233) + $_size1264 = 0; + $_etype1267 = 0; + $xfer += $input->readListBegin($_etype1267, $_size1264); + for ($_i1268 = 0; $_i1268 < $_size1264; ++$_i1268) { - $elem1234 = null; - $xfer += $input->readString($elem1234); - $this->success []= $elem1234; + $elem1269 = null; + $xfer += $input->readString($elem1269); + $this->success []= $elem1269; } $xfer += $input->readListEnd(); } else { @@ -44774,9 +44962,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1235) + foreach ($this->success as $iter1270) { - $xfer += $output->writeString($iter1235); + $xfer += $output->writeString($iter1270); } } $output->writeListEnd(); @@ -45354,12 +45542,351 @@ class ThriftHiveMetastore_abort_txn_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_abort_txn_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_abort_txn_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_abort_txns_args { + static $_TSPEC; + + /** + * @var \metastore\AbortTxnsRequest + */ + public $rqst = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'rqst', + 'type' => TType::STRUCT, + 'class' => '\metastore\AbortTxnsRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['rqst'])) { + $this->rqst = $vals['rqst']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_abort_txns_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->rqst = new \metastore\AbortTxnsRequest(); + $xfer += $this->rqst->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_abort_txns_args'); + if ($this->rqst !== null) { + if (!is_object($this->rqst)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); + $xfer += $this->rqst->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_abort_txns_result { + static $_TSPEC; + + /** + * @var \metastore\NoSuchTxnException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchTxnException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_abort_txns_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchTxnException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_abort_txns_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_commit_txn_args { + static $_TSPEC; + + /** + * @var \metastore\CommitTxnRequest + */ + public $rqst = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'rqst', + 'type' => TType::STRUCT, + 'class' => '\metastore\CommitTxnRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['rqst'])) { + $this->rqst = $vals['rqst']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_commit_txn_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->rqst = new \metastore\CommitTxnRequest(); + $xfer += $this->rqst->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_commit_txn_args'); + if ($this->rqst !== null) { + if (!is_object($this->rqst)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); + $xfer += $this->rqst->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_commit_txn_result { + static $_TSPEC; + + /** + * @var \metastore\NoSuchTxnException + */ + public $o1 = null; + /** + * @var \metastore\TxnAbortedException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchTxnException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\TxnAbortedException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_commit_txn_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchTxnException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\TxnAbortedException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_commit_txn_result'); if ($this->o1 !== null) { $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); $xfer += $this->o1->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -45367,11 +45894,11 @@ class ThriftHiveMetastore_abort_txn_result { } -class ThriftHiveMetastore_abort_txns_args { +class ThriftHiveMetastore_get_open_write_ids_args { static $_TSPEC; /** - * @var \metastore\AbortTxnsRequest + * @var \metastore\GetOpenWriteIdsRequest */ public $rqst = null; @@ -45381,7 +45908,7 @@ class ThriftHiveMetastore_abort_txns_args { 1 => array( 'var' => 'rqst', 'type' => TType::STRUCT, - 'class' => '\metastore\AbortTxnsRequest', + 'class' => '\metastore\GetOpenWriteIdsRequest', ), ); } @@ -45393,7 +45920,7 @@ class ThriftHiveMetastore_abort_txns_args { } public function getName() { - return 'ThriftHiveMetastore_abort_txns_args'; + return 'ThriftHiveMetastore_get_open_write_ids_args'; } public function read($input) @@ -45413,7 +45940,7 @@ class ThriftHiveMetastore_abort_txns_args { { case 1: if ($ftype == TType::STRUCT) { - $this->rqst = new \metastore\AbortTxnsRequest(); + $this->rqst = new \metastore\GetOpenWriteIdsRequest(); $xfer += $this->rqst->read($input); } else { $xfer += $input->skip($ftype); @@ -45431,7 +45958,7 @@ class ThriftHiveMetastore_abort_txns_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_abort_txns_args'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_open_write_ids_args'); if ($this->rqst !== null) { if (!is_object($this->rqst)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -45447,33 +45974,57 @@ class ThriftHiveMetastore_abort_txns_args { } -class ThriftHiveMetastore_abort_txns_result { +class ThriftHiveMetastore_get_open_write_ids_result { static $_TSPEC; /** + * @var \metastore\GetOpenWriteIdsResponse + */ + public $success = null; + /** * @var \metastore\NoSuchTxnException */ public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetOpenWriteIdsResponse', + ), 1 => array( 'var' => 'o1', 'type' => TType::STRUCT, 'class' => '\metastore\NoSuchTxnException', ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), ); } if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } if (isset($vals['o1'])) { $this->o1 = $vals['o1']; } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } } } public function getName() { - return 'ThriftHiveMetastore_abort_txns_result'; + return 'ThriftHiveMetastore_get_open_write_ids_result'; } public function read($input) @@ -45491,6 +46042,14 @@ class ThriftHiveMetastore_abort_txns_result { } switch ($fid) { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\GetOpenWriteIdsResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; case 1: if ($ftype == TType::STRUCT) { $this->o1 = new \metastore\NoSuchTxnException(); @@ -45499,6 +46058,14 @@ class ThriftHiveMetastore_abort_txns_result { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -45511,12 +46078,25 @@ class ThriftHiveMetastore_abort_txns_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_abort_txns_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_open_write_ids_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } if ($this->o1 !== null) { $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); $xfer += $this->o1->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -45524,11 +46104,11 @@ class ThriftHiveMetastore_abort_txns_result { } -class ThriftHiveMetastore_commit_txn_args { +class ThriftHiveMetastore_add_transactional_table_args { static $_TSPEC; /** - * @var \metastore\CommitTxnRequest + * @var \metastore\AddTransactionalTableRequest */ public $rqst = null; @@ -45538,7 +46118,7 @@ class ThriftHiveMetastore_commit_txn_args { 1 => array( 'var' => 'rqst', 'type' => TType::STRUCT, - 'class' => '\metastore\CommitTxnRequest', + 'class' => '\metastore\AddTransactionalTableRequest', ), ); } @@ -45550,7 +46130,7 @@ class ThriftHiveMetastore_commit_txn_args { } public function getName() { - return 'ThriftHiveMetastore_commit_txn_args'; + return 'ThriftHiveMetastore_add_transactional_table_args'; } public function read($input) @@ -45570,7 +46150,7 @@ class ThriftHiveMetastore_commit_txn_args { { case 1: if ($ftype == TType::STRUCT) { - $this->rqst = new \metastore\CommitTxnRequest(); + $this->rqst = new \metastore\AddTransactionalTableRequest(); $xfer += $this->rqst->read($input); } else { $xfer += $input->skip($ftype); @@ -45588,7 +46168,7 @@ class ThriftHiveMetastore_commit_txn_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_commit_txn_args'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_transactional_table_args'); if ($this->rqst !== null) { if (!is_object($this->rqst)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -45604,10 +46184,171 @@ class ThriftHiveMetastore_commit_txn_args { } -class ThriftHiveMetastore_commit_txn_result { +class ThriftHiveMetastore_add_transactional_table_result { + static $_TSPEC; + + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_add_transactional_table_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_transactional_table_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_allocate_table_write_id_args { static $_TSPEC; /** + * @var \metastore\AllocateTableWriteIdRequest + */ + public $rqst = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'rqst', + 'type' => TType::STRUCT, + 'class' => '\metastore\AllocateTableWriteIdRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['rqst'])) { + $this->rqst = $vals['rqst']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_allocate_table_write_id_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->rqst = new \metastore\AllocateTableWriteIdRequest(); + $xfer += $this->rqst->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_allocate_table_write_id_args'); + if ($this->rqst !== null) { + if (!is_object($this->rqst)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); + $xfer += $this->rqst->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_allocate_table_write_id_result { + static $_TSPEC; + + /** + * @var \metastore\AllocateTableWriteIdResponse + */ + public $success = null; + /** * @var \metastore\NoSuchTxnException */ public $o1 = null; @@ -45615,10 +46356,19 @@ class ThriftHiveMetastore_commit_txn_result { * @var \metastore\TxnAbortedException */ public $o2 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\AllocateTableWriteIdResponse', + ), 1 => array( 'var' => 'o1', 'type' => TType::STRUCT, @@ -45629,20 +46379,31 @@ class ThriftHiveMetastore_commit_txn_result { 'type' => TType::STRUCT, 'class' => '\metastore\TxnAbortedException', ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), ); } if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } if (isset($vals['o1'])) { $this->o1 = $vals['o1']; } if (isset($vals['o2'])) { $this->o2 = $vals['o2']; } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } } } public function getName() { - return 'ThriftHiveMetastore_commit_txn_result'; + return 'ThriftHiveMetastore_allocate_table_write_id_result'; } public function read($input) @@ -45660,6 +46421,14 @@ class ThriftHiveMetastore_commit_txn_result { } switch ($fid) { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\AllocateTableWriteIdResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; case 1: if ($ftype == TType::STRUCT) { $this->o1 = new \metastore\NoSuchTxnException(); @@ -45676,6 +46445,14 @@ class ThriftHiveMetastore_commit_txn_result { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -45688,7 +46465,15 @@ class ThriftHiveMetastore_commit_txn_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_commit_txn_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_allocate_table_write_id_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } if ($this->o1 !== null) { $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); $xfer += $this->o1->write($output); @@ -45699,6 +46484,11 @@ class ThriftHiveMetastore_commit_txn_result { $xfer += $this->o2->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -47569,14 +48359,14 @@ class ThriftHiveMetastore_get_last_completed_transaction_for_tables_args { case 1: if ($ftype == TType::LST) { $this->db_names = array(); - $_size1236 = 0; - $_etype1239 = 0; - $xfer += $input->readListBegin($_etype1239, $_size1236); - for ($_i1240 = 0; $_i1240 < $_size1236; ++$_i1240) + $_size1271 = 0; + $_etype1274 = 0; + $xfer += $input->readListBegin($_etype1274, $_size1271); + for ($_i1275 = 0; $_i1275 < $_size1271; ++$_i1275) { - $elem1241 = null; - $xfer += $input->readString($elem1241); - $this->db_names []= $elem1241; + $elem1276 = null; + $xfer += $input->readString($elem1276); + $this->db_names []= $elem1276; } $xfer += $input->readListEnd(); } else { @@ -47586,14 +48376,14 @@ class ThriftHiveMetastore_get_last_completed_transaction_for_tables_args { case 2: if ($ftype == TType::LST) { $this->table_names = array(); - $_size1242 = 0; - $_etype1245 = 0; - $xfer += $input->readListBegin($_etype1245, $_size1242); - for ($_i1246 = 0; $_i1246 < $_size1242; ++$_i1246) + $_size1277 = 0; + $_etype1280 = 0; + $xfer += $input->readListBegin($_etype1280, $_size1277); + for ($_i1281 = 0; $_i1281 < $_size1277; ++$_i1281) { - $elem1247 = null; - $xfer += $input->readString($elem1247); - $this->table_names []= $elem1247; + $elem1282 = null; + $xfer += $input->readString($elem1282); + $this->table_names []= $elem1282; } $xfer += $input->readListEnd(); } else { @@ -47629,9 +48419,9 @@ class ThriftHiveMetastore_get_last_completed_transaction_for_tables_args { { $output->writeListBegin(TType::STRING, count($this->db_names)); { - foreach ($this->db_names as $iter1248) + foreach ($this->db_names as $iter1283) { - $xfer += $output->writeString($iter1248); + $xfer += $output->writeString($iter1283); } } $output->writeListEnd(); @@ -47646,9 +48436,9 @@ class ThriftHiveMetastore_get_last_completed_transaction_for_tables_args { { $output->writeListBegin(TType::STRING, count($this->table_names)); { - foreach ($this->table_names as $iter1249) + foreach ($this->table_names as $iter1284) { - $xfer += $output->writeString($iter1249); + $xfer += $output->writeString($iter1284); } } $output->writeListEnd(); @@ -47721,15 +48511,15 @@ class ThriftHiveMetastore_get_last_completed_transaction_for_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1250 = 0; - $_etype1253 = 0; - $xfer += $input->readListBegin($_etype1253, $_size1250); - for ($_i1254 = 0; $_i1254 < $_size1250; ++$_i1254) + $_size1285 = 0; + $_etype1288 = 0; + $xfer += $input->readListBegin($_etype1288, $_size1285); + for ($_i1289 = 0; $_i1289 < $_size1285; ++$_i1289) { - $elem1255 = null; - $elem1255 = new \metastore\BasicTxnInfo(); - $xfer += $elem1255->read($input); - $this->success []= $elem1255; + $elem1290 = null; + $elem1290 = new \metastore\BasicTxnInfo(); + $xfer += $elem1290->read($input); + $this->success []= $elem1290; } $xfer += $input->readListEnd(); } else { @@ -47757,9 +48547,9 @@ class ThriftHiveMetastore_get_last_completed_transaction_for_tables_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1256) + foreach ($this->success as $iter1291) { - $xfer += $iter1256->write($output); + $xfer += $iter1291->write($output); } } $output->writeListEnd(); diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index 6878ee1..85f70a9 100644 --- a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -14367,6 +14367,872 @@ class CommitTxnRequest { } +class GetOpenWriteIdsRequest { + static $_TSPEC; + + /** + * @var int + */ + public $currentTxnId = null; + /** + * @var string[] + */ + public $tableNames = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'currentTxnId', + 'type' => TType::I64, + ), + 2 => array( + 'var' => 'tableNames', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['currentTxnId'])) { + $this->currentTxnId = $vals['currentTxnId']; + } + if (isset($vals['tableNames'])) { + $this->tableNames = $vals['tableNames']; + } + } + } + + public function getName() { + return 'GetOpenWriteIdsRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->currentTxnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::LST) { + $this->tableNames = array(); + $_size499 = 0; + $_etype502 = 0; + $xfer += $input->readListBegin($_etype502, $_size499); + for ($_i503 = 0; $_i503 < $_size499; ++$_i503) + { + $elem504 = null; + $xfer += $input->readString($elem504); + $this->tableNames []= $elem504; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetOpenWriteIdsRequest'); + if ($this->currentTxnId !== null) { + $xfer += $output->writeFieldBegin('currentTxnId', TType::I64, 1); + $xfer += $output->writeI64($this->currentTxnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->tableNames !== null) { + if (!is_array($this->tableNames)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('tableNames', TType::LST, 2); + { + $output->writeListBegin(TType::STRING, count($this->tableNames)); + { + foreach ($this->tableNames as $iter505) + { + $xfer += $output->writeString($iter505); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class OpenWriteIds { + static $_TSPEC; + + /** + * @var string + */ + public $tableName = null; + /** + * @var int + */ + public $writeIdHighWaterMark = null; + /** + * @var int[] + */ + public $openWriteIds = null; + /** + * @var int + */ + public $minWriteId = null; + /** + * @var string + */ + public $abortedBits = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'tableName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'writeIdHighWaterMark', + 'type' => TType::I64, + ), + 3 => array( + 'var' => 'openWriteIds', + 'type' => TType::LST, + 'etype' => TType::I64, + 'elem' => array( + 'type' => TType::I64, + ), + ), + 4 => array( + 'var' => 'minWriteId', + 'type' => TType::I64, + ), + 5 => array( + 'var' => 'abortedBits', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['tableName'])) { + $this->tableName = $vals['tableName']; + } + if (isset($vals['writeIdHighWaterMark'])) { + $this->writeIdHighWaterMark = $vals['writeIdHighWaterMark']; + } + if (isset($vals['openWriteIds'])) { + $this->openWriteIds = $vals['openWriteIds']; + } + if (isset($vals['minWriteId'])) { + $this->minWriteId = $vals['minWriteId']; + } + if (isset($vals['abortedBits'])) { + $this->abortedBits = $vals['abortedBits']; + } + } + } + + public function getName() { + return 'OpenWriteIds'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tableName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeIdHighWaterMark); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::LST) { + $this->openWriteIds = array(); + $_size506 = 0; + $_etype509 = 0; + $xfer += $input->readListBegin($_etype509, $_size506); + for ($_i510 = 0; $_i510 < $_size506; ++$_i510) + { + $elem511 = null; + $xfer += $input->readI64($elem511); + $this->openWriteIds []= $elem511; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->minWriteId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->abortedBits); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('OpenWriteIds'); + if ($this->tableName !== null) { + $xfer += $output->writeFieldBegin('tableName', TType::STRING, 1); + $xfer += $output->writeString($this->tableName); + $xfer += $output->writeFieldEnd(); + } + if ($this->writeIdHighWaterMark !== null) { + $xfer += $output->writeFieldBegin('writeIdHighWaterMark', TType::I64, 2); + $xfer += $output->writeI64($this->writeIdHighWaterMark); + $xfer += $output->writeFieldEnd(); + } + if ($this->openWriteIds !== null) { + if (!is_array($this->openWriteIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('openWriteIds', TType::LST, 3); + { + $output->writeListBegin(TType::I64, count($this->openWriteIds)); + { + foreach ($this->openWriteIds as $iter512) + { + $xfer += $output->writeI64($iter512); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->minWriteId !== null) { + $xfer += $output->writeFieldBegin('minWriteId', TType::I64, 4); + $xfer += $output->writeI64($this->minWriteId); + $xfer += $output->writeFieldEnd(); + } + if ($this->abortedBits !== null) { + $xfer += $output->writeFieldBegin('abortedBits', TType::STRING, 5); + $xfer += $output->writeString($this->abortedBits); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetOpenWriteIdsResponse { + static $_TSPEC; + + /** + * @var \metastore\OpenWriteIds[] + */ + public $openWriteIds = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'openWriteIds', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\OpenWriteIds', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['openWriteIds'])) { + $this->openWriteIds = $vals['openWriteIds']; + } + } + } + + public function getName() { + return 'GetOpenWriteIdsResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->openWriteIds = array(); + $_size513 = 0; + $_etype516 = 0; + $xfer += $input->readListBegin($_etype516, $_size513); + for ($_i517 = 0; $_i517 < $_size513; ++$_i517) + { + $elem518 = null; + $elem518 = new \metastore\OpenWriteIds(); + $xfer += $elem518->read($input); + $this->openWriteIds []= $elem518; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetOpenWriteIdsResponse'); + if ($this->openWriteIds !== null) { + if (!is_array($this->openWriteIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('openWriteIds', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->openWriteIds)); + { + foreach ($this->openWriteIds as $iter519) + { + $xfer += $iter519->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class AddTransactionalTableRequest { + static $_TSPEC; + + /** + * @var string + */ + public $dbName = null; + /** + * @var string + */ + public $tableName = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tableName', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tableName'])) { + $this->tableName = $vals['tableName']; + } + } + } + + public function getName() { + return 'AddTransactionalTableRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tableName); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('AddTransactionalTableRequest'); + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tableName !== null) { + $xfer += $output->writeFieldBegin('tableName', TType::STRING, 2); + $xfer += $output->writeString($this->tableName); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class AllocateTableWriteIdRequest { + static $_TSPEC; + + /** + * @var int[] + */ + public $txnIds = null; + /** + * @var string + */ + public $dbName = null; + /** + * @var string + */ + public $tableName = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'txnIds', + 'type' => TType::LST, + 'etype' => TType::I64, + 'elem' => array( + 'type' => TType::I64, + ), + ), + 2 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'tableName', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['txnIds'])) { + $this->txnIds = $vals['txnIds']; + } + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tableName'])) { + $this->tableName = $vals['tableName']; + } + } + } + + public function getName() { + return 'AllocateTableWriteIdRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->txnIds = array(); + $_size520 = 0; + $_etype523 = 0; + $xfer += $input->readListBegin($_etype523, $_size520); + for ($_i524 = 0; $_i524 < $_size520; ++$_i524) + { + $elem525 = null; + $xfer += $input->readI64($elem525); + $this->txnIds []= $elem525; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tableName); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('AllocateTableWriteIdRequest'); + if ($this->txnIds !== null) { + if (!is_array($this->txnIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('txnIds', TType::LST, 1); + { + $output->writeListBegin(TType::I64, count($this->txnIds)); + { + foreach ($this->txnIds as $iter526) + { + $xfer += $output->writeI64($iter526); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 2); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tableName !== null) { + $xfer += $output->writeFieldBegin('tableName', TType::STRING, 3); + $xfer += $output->writeString($this->tableName); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class TxnToWriteId { + static $_TSPEC; + + /** + * @var int + */ + public $txnId = null; + /** + * @var int + */ + public $writeId = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'txnId', + 'type' => TType::I64, + ), + 2 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['txnId'])) { + $this->txnId = $vals['txnId']; + } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } + } + } + + public function getName() { + return 'TxnToWriteId'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->txnId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TxnToWriteId'); + if ($this->txnId !== null) { + $xfer += $output->writeFieldBegin('txnId', TType::I64, 1); + $xfer += $output->writeI64($this->txnId); + $xfer += $output->writeFieldEnd(); + } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 2); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class AllocateTableWriteIdResponse { + static $_TSPEC; + + /** + * @var \metastore\TxnToWriteId[] + */ + public $txnToWriteIds = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'txnToWriteIds', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\TxnToWriteId', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['txnToWriteIds'])) { + $this->txnToWriteIds = $vals['txnToWriteIds']; + } + } + } + + public function getName() { + return 'AllocateTableWriteIdResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->txnToWriteIds = array(); + $_size527 = 0; + $_etype530 = 0; + $xfer += $input->readListBegin($_etype530, $_size527); + for ($_i531 = 0; $_i531 < $_size527; ++$_i531) + { + $elem532 = null; + $elem532 = new \metastore\TxnToWriteId(); + $xfer += $elem532->read($input); + $this->txnToWriteIds []= $elem532; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('AllocateTableWriteIdResponse'); + if ($this->txnToWriteIds !== null) { + if (!is_array($this->txnToWriteIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('txnToWriteIds', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->txnToWriteIds)); + { + foreach ($this->txnToWriteIds as $iter533) + { + $xfer += $iter533->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class LockComponent { static $_TSPEC; @@ -14698,15 +15564,15 @@ class LockRequest { case 1: if ($ftype == TType::LST) { $this->component = array(); - $_size499 = 0; - $_etype502 = 0; - $xfer += $input->readListBegin($_etype502, $_size499); - for ($_i503 = 0; $_i503 < $_size499; ++$_i503) + $_size534 = 0; + $_etype537 = 0; + $xfer += $input->readListBegin($_etype537, $_size534); + for ($_i538 = 0; $_i538 < $_size534; ++$_i538) { - $elem504 = null; - $elem504 = new \metastore\LockComponent(); - $xfer += $elem504->read($input); - $this->component []= $elem504; + $elem539 = null; + $elem539 = new \metastore\LockComponent(); + $xfer += $elem539->read($input); + $this->component []= $elem539; } $xfer += $input->readListEnd(); } else { @@ -14762,9 +15628,9 @@ class LockRequest { { $output->writeListBegin(TType::STRUCT, count($this->component)); { - foreach ($this->component as $iter505) + foreach ($this->component as $iter540) { - $xfer += $iter505->write($output); + $xfer += $iter540->write($output); } } $output->writeListEnd(); @@ -15707,15 +16573,15 @@ class ShowLocksResponse { case 1: if ($ftype == TType::LST) { $this->locks = array(); - $_size506 = 0; - $_etype509 = 0; - $xfer += $input->readListBegin($_etype509, $_size506); - for ($_i510 = 0; $_i510 < $_size506; ++$_i510) + $_size541 = 0; + $_etype544 = 0; + $xfer += $input->readListBegin($_etype544, $_size541); + for ($_i545 = 0; $_i545 < $_size541; ++$_i545) { - $elem511 = null; - $elem511 = new \metastore\ShowLocksResponseElement(); - $xfer += $elem511->read($input); - $this->locks []= $elem511; + $elem546 = null; + $elem546 = new \metastore\ShowLocksResponseElement(); + $xfer += $elem546->read($input); + $this->locks []= $elem546; } $xfer += $input->readListEnd(); } else { @@ -15743,9 +16609,9 @@ class ShowLocksResponse { { $output->writeListBegin(TType::STRUCT, count($this->locks)); { - foreach ($this->locks as $iter512) + foreach ($this->locks as $iter547) { - $xfer += $iter512->write($output); + $xfer += $iter547->write($output); } } $output->writeListEnd(); @@ -16020,17 +16886,17 @@ class HeartbeatTxnRangeResponse { case 1: if ($ftype == TType::SET) { $this->aborted = array(); - $_size513 = 0; - $_etype516 = 0; - $xfer += $input->readSetBegin($_etype516, $_size513); - for ($_i517 = 0; $_i517 < $_size513; ++$_i517) + $_size548 = 0; + $_etype551 = 0; + $xfer += $input->readSetBegin($_etype551, $_size548); + for ($_i552 = 0; $_i552 < $_size548; ++$_i552) { - $elem518 = null; - $xfer += $input->readI64($elem518); - if (is_scalar($elem518)) { - $this->aborted[$elem518] = true; + $elem553 = null; + $xfer += $input->readI64($elem553); + if (is_scalar($elem553)) { + $this->aborted[$elem553] = true; } else { - $this->aborted []= $elem518; + $this->aborted []= $elem553; } } $xfer += $input->readSetEnd(); @@ -16041,17 +16907,17 @@ class HeartbeatTxnRangeResponse { case 2: if ($ftype == TType::SET) { $this->nosuch = array(); - $_size519 = 0; - $_etype522 = 0; - $xfer += $input->readSetBegin($_etype522, $_size519); - for ($_i523 = 0; $_i523 < $_size519; ++$_i523) + $_size554 = 0; + $_etype557 = 0; + $xfer += $input->readSetBegin($_etype557, $_size554); + for ($_i558 = 0; $_i558 < $_size554; ++$_i558) { - $elem524 = null; - $xfer += $input->readI64($elem524); - if (is_scalar($elem524)) { - $this->nosuch[$elem524] = true; + $elem559 = null; + $xfer += $input->readI64($elem559); + if (is_scalar($elem559)) { + $this->nosuch[$elem559] = true; } else { - $this->nosuch []= $elem524; + $this->nosuch []= $elem559; } } $xfer += $input->readSetEnd(); @@ -16080,12 +16946,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->aborted)); { - foreach ($this->aborted as $iter525 => $iter526) + foreach ($this->aborted as $iter560 => $iter561) { - if (is_scalar($iter526)) { - $xfer += $output->writeI64($iter525); + if (is_scalar($iter561)) { + $xfer += $output->writeI64($iter560); } else { - $xfer += $output->writeI64($iter526); + $xfer += $output->writeI64($iter561); } } } @@ -16101,12 +16967,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->nosuch)); { - foreach ($this->nosuch as $iter527 => $iter528) + foreach ($this->nosuch as $iter562 => $iter563) { - if (is_scalar($iter528)) { - $xfer += $output->writeI64($iter527); + if (is_scalar($iter563)) { + $xfer += $output->writeI64($iter562); } else { - $xfer += $output->writeI64($iter528); + $xfer += $output->writeI64($iter563); } } } @@ -16265,17 +17131,17 @@ class CompactionRequest { case 6: if ($ftype == TType::MAP) { $this->properties = array(); - $_size529 = 0; - $_ktype530 = 0; - $_vtype531 = 0; - $xfer += $input->readMapBegin($_ktype530, $_vtype531, $_size529); - for ($_i533 = 0; $_i533 < $_size529; ++$_i533) + $_size564 = 0; + $_ktype565 = 0; + $_vtype566 = 0; + $xfer += $input->readMapBegin($_ktype565, $_vtype566, $_size564); + for ($_i568 = 0; $_i568 < $_size564; ++$_i568) { - $key534 = ''; - $val535 = ''; - $xfer += $input->readString($key534); - $xfer += $input->readString($val535); - $this->properties[$key534] = $val535; + $key569 = ''; + $val570 = ''; + $xfer += $input->readString($key569); + $xfer += $input->readString($val570); + $this->properties[$key569] = $val570; } $xfer += $input->readMapEnd(); } else { @@ -16328,10 +17194,10 @@ class CompactionRequest { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter536 => $viter537) + foreach ($this->properties as $kiter571 => $viter572) { - $xfer += $output->writeString($kiter536); - $xfer += $output->writeString($viter537); + $xfer += $output->writeString($kiter571); + $xfer += $output->writeString($viter572); } } $output->writeMapEnd(); @@ -16918,15 +17784,15 @@ class ShowCompactResponse { case 1: if ($ftype == TType::LST) { $this->compacts = array(); - $_size538 = 0; - $_etype541 = 0; - $xfer += $input->readListBegin($_etype541, $_size538); - for ($_i542 = 0; $_i542 < $_size538; ++$_i542) + $_size573 = 0; + $_etype576 = 0; + $xfer += $input->readListBegin($_etype576, $_size573); + for ($_i577 = 0; $_i577 < $_size573; ++$_i577) { - $elem543 = null; - $elem543 = new \metastore\ShowCompactResponseElement(); - $xfer += $elem543->read($input); - $this->compacts []= $elem543; + $elem578 = null; + $elem578 = new \metastore\ShowCompactResponseElement(); + $xfer += $elem578->read($input); + $this->compacts []= $elem578; } $xfer += $input->readListEnd(); } else { @@ -16954,9 +17820,9 @@ class ShowCompactResponse { { $output->writeListBegin(TType::STRUCT, count($this->compacts)); { - foreach ($this->compacts as $iter544) + foreach ($this->compacts as $iter579) { - $xfer += $iter544->write($output); + $xfer += $iter579->write($output); } } $output->writeListEnd(); @@ -17085,14 +17951,14 @@ class AddDynamicPartitions { case 4: if ($ftype == TType::LST) { $this->partitionnames = array(); - $_size545 = 0; - $_etype548 = 0; - $xfer += $input->readListBegin($_etype548, $_size545); - for ($_i549 = 0; $_i549 < $_size545; ++$_i549) + $_size580 = 0; + $_etype583 = 0; + $xfer += $input->readListBegin($_etype583, $_size580); + for ($_i584 = 0; $_i584 < $_size580; ++$_i584) { - $elem550 = null; - $xfer += $input->readString($elem550); - $this->partitionnames []= $elem550; + $elem585 = null; + $xfer += $input->readString($elem585); + $this->partitionnames []= $elem585; } $xfer += $input->readListEnd(); } else { @@ -17142,9 +18008,9 @@ class AddDynamicPartitions { { $output->writeListBegin(TType::STRING, count($this->partitionnames)); { - foreach ($this->partitionnames as $iter551) + foreach ($this->partitionnames as $iter586) { - $xfer += $output->writeString($iter551); + $xfer += $output->writeString($iter586); } } $output->writeListEnd(); @@ -17444,14 +18310,14 @@ class TxnsSnapshot { case 2: if ($ftype == TType::LST) { $this->open_txns = array(); - $_size552 = 0; - $_etype555 = 0; - $xfer += $input->readListBegin($_etype555, $_size552); - for ($_i556 = 0; $_i556 < $_size552; ++$_i556) + $_size587 = 0; + $_etype590 = 0; + $xfer += $input->readListBegin($_etype590, $_size587); + for ($_i591 = 0; $_i591 < $_size587; ++$_i591) { - $elem557 = null; - $xfer += $input->readI64($elem557); - $this->open_txns []= $elem557; + $elem592 = null; + $xfer += $input->readI64($elem592); + $this->open_txns []= $elem592; } $xfer += $input->readListEnd(); } else { @@ -17484,9 +18350,9 @@ class TxnsSnapshot { { $output->writeListBegin(TType::I64, count($this->open_txns)); { - foreach ($this->open_txns as $iter558) + foreach ($this->open_txns as $iter593) { - $xfer += $output->writeI64($iter558); + $xfer += $output->writeI64($iter593); } } $output->writeListEnd(); @@ -17862,15 +18728,15 @@ class NotificationEventResponse { case 1: if ($ftype == TType::LST) { $this->events = array(); - $_size559 = 0; - $_etype562 = 0; - $xfer += $input->readListBegin($_etype562, $_size559); - for ($_i563 = 0; $_i563 < $_size559; ++$_i563) + $_size594 = 0; + $_etype597 = 0; + $xfer += $input->readListBegin($_etype597, $_size594); + for ($_i598 = 0; $_i598 < $_size594; ++$_i598) { - $elem564 = null; - $elem564 = new \metastore\NotificationEvent(); - $xfer += $elem564->read($input); - $this->events []= $elem564; + $elem599 = null; + $elem599 = new \metastore\NotificationEvent(); + $xfer += $elem599->read($input); + $this->events []= $elem599; } $xfer += $input->readListEnd(); } else { @@ -17898,9 +18764,9 @@ class NotificationEventResponse { { $output->writeListBegin(TType::STRUCT, count($this->events)); { - foreach ($this->events as $iter565) + foreach ($this->events as $iter600) { - $xfer += $iter565->write($output); + $xfer += $iter600->write($output); } } $output->writeListEnd(); @@ -18245,14 +19111,14 @@ class InsertEventRequestData { case 2: if ($ftype == TType::LST) { $this->filesAdded = array(); - $_size566 = 0; - $_etype569 = 0; - $xfer += $input->readListBegin($_etype569, $_size566); - for ($_i570 = 0; $_i570 < $_size566; ++$_i570) + $_size601 = 0; + $_etype604 = 0; + $xfer += $input->readListBegin($_etype604, $_size601); + for ($_i605 = 0; $_i605 < $_size601; ++$_i605) { - $elem571 = null; - $xfer += $input->readString($elem571); - $this->filesAdded []= $elem571; + $elem606 = null; + $xfer += $input->readString($elem606); + $this->filesAdded []= $elem606; } $xfer += $input->readListEnd(); } else { @@ -18262,14 +19128,14 @@ class InsertEventRequestData { case 3: if ($ftype == TType::LST) { $this->filesAddedChecksum = array(); - $_size572 = 0; - $_etype575 = 0; - $xfer += $input->readListBegin($_etype575, $_size572); - for ($_i576 = 0; $_i576 < $_size572; ++$_i576) + $_size607 = 0; + $_etype610 = 0; + $xfer += $input->readListBegin($_etype610, $_size607); + for ($_i611 = 0; $_i611 < $_size607; ++$_i611) { - $elem577 = null; - $xfer += $input->readString($elem577); - $this->filesAddedChecksum []= $elem577; + $elem612 = null; + $xfer += $input->readString($elem612); + $this->filesAddedChecksum []= $elem612; } $xfer += $input->readListEnd(); } else { @@ -18302,9 +19168,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAdded)); { - foreach ($this->filesAdded as $iter578) + foreach ($this->filesAdded as $iter613) { - $xfer += $output->writeString($iter578); + $xfer += $output->writeString($iter613); } } $output->writeListEnd(); @@ -18319,9 +19185,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAddedChecksum)); { - foreach ($this->filesAddedChecksum as $iter579) + foreach ($this->filesAddedChecksum as $iter614) { - $xfer += $output->writeString($iter579); + $xfer += $output->writeString($iter614); } } $output->writeListEnd(); @@ -18539,14 +19405,14 @@ class FireEventRequest { case 5: if ($ftype == TType::LST) { $this->partitionVals = array(); - $_size580 = 0; - $_etype583 = 0; - $xfer += $input->readListBegin($_etype583, $_size580); - for ($_i584 = 0; $_i584 < $_size580; ++$_i584) + $_size615 = 0; + $_etype618 = 0; + $xfer += $input->readListBegin($_etype618, $_size615); + for ($_i619 = 0; $_i619 < $_size615; ++$_i619) { - $elem585 = null; - $xfer += $input->readString($elem585); - $this->partitionVals []= $elem585; + $elem620 = null; + $xfer += $input->readString($elem620); + $this->partitionVals []= $elem620; } $xfer += $input->readListEnd(); } else { @@ -18597,9 +19463,9 @@ class FireEventRequest { { $output->writeListBegin(TType::STRING, count($this->partitionVals)); { - foreach ($this->partitionVals as $iter586) + foreach ($this->partitionVals as $iter621) { - $xfer += $output->writeString($iter586); + $xfer += $output->writeString($iter621); } } $output->writeListEnd(); @@ -18827,18 +19693,18 @@ class GetFileMetadataByExprResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size587 = 0; - $_ktype588 = 0; - $_vtype589 = 0; - $xfer += $input->readMapBegin($_ktype588, $_vtype589, $_size587); - for ($_i591 = 0; $_i591 < $_size587; ++$_i591) + $_size622 = 0; + $_ktype623 = 0; + $_vtype624 = 0; + $xfer += $input->readMapBegin($_ktype623, $_vtype624, $_size622); + for ($_i626 = 0; $_i626 < $_size622; ++$_i626) { - $key592 = 0; - $val593 = new \metastore\MetadataPpdResult(); - $xfer += $input->readI64($key592); - $val593 = new \metastore\MetadataPpdResult(); - $xfer += $val593->read($input); - $this->metadata[$key592] = $val593; + $key627 = 0; + $val628 = new \metastore\MetadataPpdResult(); + $xfer += $input->readI64($key627); + $val628 = new \metastore\MetadataPpdResult(); + $xfer += $val628->read($input); + $this->metadata[$key627] = $val628; } $xfer += $input->readMapEnd(); } else { @@ -18873,10 +19739,10 @@ class GetFileMetadataByExprResult { { $output->writeMapBegin(TType::I64, TType::STRUCT, count($this->metadata)); { - foreach ($this->metadata as $kiter594 => $viter595) + foreach ($this->metadata as $kiter629 => $viter630) { - $xfer += $output->writeI64($kiter594); - $xfer += $viter595->write($output); + $xfer += $output->writeI64($kiter629); + $xfer += $viter630->write($output); } } $output->writeMapEnd(); @@ -18978,14 +19844,14 @@ class GetFileMetadataByExprRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size596 = 0; - $_etype599 = 0; - $xfer += $input->readListBegin($_etype599, $_size596); - for ($_i600 = 0; $_i600 < $_size596; ++$_i600) + $_size631 = 0; + $_etype634 = 0; + $xfer += $input->readListBegin($_etype634, $_size631); + for ($_i635 = 0; $_i635 < $_size631; ++$_i635) { - $elem601 = null; - $xfer += $input->readI64($elem601); - $this->fileIds []= $elem601; + $elem636 = null; + $xfer += $input->readI64($elem636); + $this->fileIds []= $elem636; } $xfer += $input->readListEnd(); } else { @@ -19034,9 +19900,9 @@ class GetFileMetadataByExprRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter602) + foreach ($this->fileIds as $iter637) { - $xfer += $output->writeI64($iter602); + $xfer += $output->writeI64($iter637); } } $output->writeListEnd(); @@ -19130,17 +19996,17 @@ class GetFileMetadataResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size603 = 0; - $_ktype604 = 0; - $_vtype605 = 0; - $xfer += $input->readMapBegin($_ktype604, $_vtype605, $_size603); - for ($_i607 = 0; $_i607 < $_size603; ++$_i607) + $_size638 = 0; + $_ktype639 = 0; + $_vtype640 = 0; + $xfer += $input->readMapBegin($_ktype639, $_vtype640, $_size638); + for ($_i642 = 0; $_i642 < $_size638; ++$_i642) { - $key608 = 0; - $val609 = ''; - $xfer += $input->readI64($key608); - $xfer += $input->readString($val609); - $this->metadata[$key608] = $val609; + $key643 = 0; + $val644 = ''; + $xfer += $input->readI64($key643); + $xfer += $input->readString($val644); + $this->metadata[$key643] = $val644; } $xfer += $input->readMapEnd(); } else { @@ -19175,10 +20041,10 @@ class GetFileMetadataResult { { $output->writeMapBegin(TType::I64, TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $kiter610 => $viter611) + foreach ($this->metadata as $kiter645 => $viter646) { - $xfer += $output->writeI64($kiter610); - $xfer += $output->writeString($viter611); + $xfer += $output->writeI64($kiter645); + $xfer += $output->writeString($viter646); } } $output->writeMapEnd(); @@ -19247,14 +20113,14 @@ class GetFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size612 = 0; - $_etype615 = 0; - $xfer += $input->readListBegin($_etype615, $_size612); - for ($_i616 = 0; $_i616 < $_size612; ++$_i616) + $_size647 = 0; + $_etype650 = 0; + $xfer += $input->readListBegin($_etype650, $_size647); + for ($_i651 = 0; $_i651 < $_size647; ++$_i651) { - $elem617 = null; - $xfer += $input->readI64($elem617); - $this->fileIds []= $elem617; + $elem652 = null; + $xfer += $input->readI64($elem652); + $this->fileIds []= $elem652; } $xfer += $input->readListEnd(); } else { @@ -19282,9 +20148,9 @@ class GetFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter618) + foreach ($this->fileIds as $iter653) { - $xfer += $output->writeI64($iter618); + $xfer += $output->writeI64($iter653); } } $output->writeListEnd(); @@ -19424,14 +20290,14 @@ class PutFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size619 = 0; - $_etype622 = 0; - $xfer += $input->readListBegin($_etype622, $_size619); - for ($_i623 = 0; $_i623 < $_size619; ++$_i623) + $_size654 = 0; + $_etype657 = 0; + $xfer += $input->readListBegin($_etype657, $_size654); + for ($_i658 = 0; $_i658 < $_size654; ++$_i658) { - $elem624 = null; - $xfer += $input->readI64($elem624); - $this->fileIds []= $elem624; + $elem659 = null; + $xfer += $input->readI64($elem659); + $this->fileIds []= $elem659; } $xfer += $input->readListEnd(); } else { @@ -19441,14 +20307,14 @@ class PutFileMetadataRequest { case 2: if ($ftype == TType::LST) { $this->metadata = array(); - $_size625 = 0; - $_etype628 = 0; - $xfer += $input->readListBegin($_etype628, $_size625); - for ($_i629 = 0; $_i629 < $_size625; ++$_i629) + $_size660 = 0; + $_etype663 = 0; + $xfer += $input->readListBegin($_etype663, $_size660); + for ($_i664 = 0; $_i664 < $_size660; ++$_i664) { - $elem630 = null; - $xfer += $input->readString($elem630); - $this->metadata []= $elem630; + $elem665 = null; + $xfer += $input->readString($elem665); + $this->metadata []= $elem665; } $xfer += $input->readListEnd(); } else { @@ -19483,9 +20349,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter631) + foreach ($this->fileIds as $iter666) { - $xfer += $output->writeI64($iter631); + $xfer += $output->writeI64($iter666); } } $output->writeListEnd(); @@ -19500,9 +20366,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $iter632) + foreach ($this->metadata as $iter667) { - $xfer += $output->writeString($iter632); + $xfer += $output->writeString($iter667); } } $output->writeListEnd(); @@ -19621,14 +20487,14 @@ class ClearFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size633 = 0; - $_etype636 = 0; - $xfer += $input->readListBegin($_etype636, $_size633); - for ($_i637 = 0; $_i637 < $_size633; ++$_i637) + $_size668 = 0; + $_etype671 = 0; + $xfer += $input->readListBegin($_etype671, $_size668); + for ($_i672 = 0; $_i672 < $_size668; ++$_i672) { - $elem638 = null; - $xfer += $input->readI64($elem638); - $this->fileIds []= $elem638; + $elem673 = null; + $xfer += $input->readI64($elem673); + $this->fileIds []= $elem673; } $xfer += $input->readListEnd(); } else { @@ -19656,9 +20522,9 @@ class ClearFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter639) + foreach ($this->fileIds as $iter674) { - $xfer += $output->writeI64($iter639); + $xfer += $output->writeI64($iter674); } } $output->writeListEnd(); @@ -19942,15 +20808,15 @@ class GetAllFunctionsResponse { case 1: if ($ftype == TType::LST) { $this->functions = array(); - $_size640 = 0; - $_etype643 = 0; - $xfer += $input->readListBegin($_etype643, $_size640); - for ($_i644 = 0; $_i644 < $_size640; ++$_i644) + $_size675 = 0; + $_etype678 = 0; + $xfer += $input->readListBegin($_etype678, $_size675); + for ($_i679 = 0; $_i679 < $_size675; ++$_i679) { - $elem645 = null; - $elem645 = new \metastore\Function(); - $xfer += $elem645->read($input); - $this->functions []= $elem645; + $elem680 = null; + $elem680 = new \metastore\Function(); + $xfer += $elem680->read($input); + $this->functions []= $elem680; } $xfer += $input->readListEnd(); } else { @@ -19978,9 +20844,9 @@ class GetAllFunctionsResponse { { $output->writeListBegin(TType::STRUCT, count($this->functions)); { - foreach ($this->functions as $iter646) + foreach ($this->functions as $iter681) { - $xfer += $iter646->write($output); + $xfer += $iter681->write($output); } } $output->writeListEnd(); @@ -20044,14 +20910,14 @@ class ClientCapabilities { case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size647 = 0; - $_etype650 = 0; - $xfer += $input->readListBegin($_etype650, $_size647); - for ($_i651 = 0; $_i651 < $_size647; ++$_i651) + $_size682 = 0; + $_etype685 = 0; + $xfer += $input->readListBegin($_etype685, $_size682); + for ($_i686 = 0; $_i686 < $_size682; ++$_i686) { - $elem652 = null; - $xfer += $input->readI32($elem652); - $this->values []= $elem652; + $elem687 = null; + $xfer += $input->readI32($elem687); + $this->values []= $elem687; } $xfer += $input->readListEnd(); } else { @@ -20079,9 +20945,9 @@ class ClientCapabilities { { $output->writeListBegin(TType::I32, count($this->values)); { - foreach ($this->values as $iter653) + foreach ($this->values as $iter688) { - $xfer += $output->writeI32($iter653); + $xfer += $output->writeI32($iter688); } } $output->writeListEnd(); @@ -20381,14 +21247,14 @@ class GetTablesRequest { case 2: if ($ftype == TType::LST) { $this->tblNames = array(); - $_size654 = 0; - $_etype657 = 0; - $xfer += $input->readListBegin($_etype657, $_size654); - for ($_i658 = 0; $_i658 < $_size654; ++$_i658) + $_size689 = 0; + $_etype692 = 0; + $xfer += $input->readListBegin($_etype692, $_size689); + for ($_i693 = 0; $_i693 < $_size689; ++$_i693) { - $elem659 = null; - $xfer += $input->readString($elem659); - $this->tblNames []= $elem659; + $elem694 = null; + $xfer += $input->readString($elem694); + $this->tblNames []= $elem694; } $xfer += $input->readListEnd(); } else { @@ -20429,9 +21295,9 @@ class GetTablesRequest { { $output->writeListBegin(TType::STRING, count($this->tblNames)); { - foreach ($this->tblNames as $iter660) + foreach ($this->tblNames as $iter695) { - $xfer += $output->writeString($iter660); + $xfer += $output->writeString($iter695); } } $output->writeListEnd(); @@ -20504,15 +21370,15 @@ class GetTablesResult { case 1: if ($ftype == TType::LST) { $this->tables = array(); - $_size661 = 0; - $_etype664 = 0; - $xfer += $input->readListBegin($_etype664, $_size661); - for ($_i665 = 0; $_i665 < $_size661; ++$_i665) + $_size696 = 0; + $_etype699 = 0; + $xfer += $input->readListBegin($_etype699, $_size696); + for ($_i700 = 0; $_i700 < $_size696; ++$_i700) { - $elem666 = null; - $elem666 = new \metastore\Table(); - $xfer += $elem666->read($input); - $this->tables []= $elem666; + $elem701 = null; + $elem701 = new \metastore\Table(); + $xfer += $elem701->read($input); + $this->tables []= $elem701; } $xfer += $input->readListEnd(); } else { @@ -20540,9 +21406,9 @@ class GetTablesResult { { $output->writeListBegin(TType::STRUCT, count($this->tables)); { - foreach ($this->tables as $iter667) + foreach ($this->tables as $iter702) { - $xfer += $iter667->write($output); + $xfer += $iter702->write($output); } } $output->writeListEnd(); @@ -20929,17 +21795,17 @@ class Materialization { case 2: if ($ftype == TType::SET) { $this->tablesUsed = array(); - $_size668 = 0; - $_etype671 = 0; - $xfer += $input->readSetBegin($_etype671, $_size668); - for ($_i672 = 0; $_i672 < $_size668; ++$_i672) + $_size703 = 0; + $_etype706 = 0; + $xfer += $input->readSetBegin($_etype706, $_size703); + for ($_i707 = 0; $_i707 < $_size703; ++$_i707) { - $elem673 = null; - $xfer += $input->readString($elem673); - if (is_scalar($elem673)) { - $this->tablesUsed[$elem673] = true; + $elem708 = null; + $xfer += $input->readString($elem708); + if (is_scalar($elem708)) { + $this->tablesUsed[$elem708] = true; } else { - $this->tablesUsed []= $elem673; + $this->tablesUsed []= $elem708; } } $xfer += $input->readSetEnd(); @@ -20983,12 +21849,12 @@ class Materialization { { $output->writeSetBegin(TType::STRING, count($this->tablesUsed)); { - foreach ($this->tablesUsed as $iter674 => $iter675) + foreach ($this->tablesUsed as $iter709 => $iter710) { - if (is_scalar($iter675)) { - $xfer += $output->writeString($iter674); + if (is_scalar($iter710)) { + $xfer += $output->writeString($iter709); } else { - $xfer += $output->writeString($iter675); + $xfer += $output->writeString($iter710); } } } @@ -22250,15 +23116,15 @@ class WMFullResourcePlan { case 2: if ($ftype == TType::LST) { $this->pools = array(); - $_size676 = 0; - $_etype679 = 0; - $xfer += $input->readListBegin($_etype679, $_size676); - for ($_i680 = 0; $_i680 < $_size676; ++$_i680) + $_size711 = 0; + $_etype714 = 0; + $xfer += $input->readListBegin($_etype714, $_size711); + for ($_i715 = 0; $_i715 < $_size711; ++$_i715) { - $elem681 = null; - $elem681 = new \metastore\WMPool(); - $xfer += $elem681->read($input); - $this->pools []= $elem681; + $elem716 = null; + $elem716 = new \metastore\WMPool(); + $xfer += $elem716->read($input); + $this->pools []= $elem716; } $xfer += $input->readListEnd(); } else { @@ -22268,15 +23134,15 @@ class WMFullResourcePlan { case 3: if ($ftype == TType::LST) { $this->mappings = array(); - $_size682 = 0; - $_etype685 = 0; - $xfer += $input->readListBegin($_etype685, $_size682); - for ($_i686 = 0; $_i686 < $_size682; ++$_i686) + $_size717 = 0; + $_etype720 = 0; + $xfer += $input->readListBegin($_etype720, $_size717); + for ($_i721 = 0; $_i721 < $_size717; ++$_i721) { - $elem687 = null; - $elem687 = new \metastore\WMMapping(); - $xfer += $elem687->read($input); - $this->mappings []= $elem687; + $elem722 = null; + $elem722 = new \metastore\WMMapping(); + $xfer += $elem722->read($input); + $this->mappings []= $elem722; } $xfer += $input->readListEnd(); } else { @@ -22286,15 +23152,15 @@ class WMFullResourcePlan { case 4: if ($ftype == TType::LST) { $this->triggers = array(); - $_size688 = 0; - $_etype691 = 0; - $xfer += $input->readListBegin($_etype691, $_size688); - for ($_i692 = 0; $_i692 < $_size688; ++$_i692) + $_size723 = 0; + $_etype726 = 0; + $xfer += $input->readListBegin($_etype726, $_size723); + for ($_i727 = 0; $_i727 < $_size723; ++$_i727) { - $elem693 = null; - $elem693 = new \metastore\WMTrigger(); - $xfer += $elem693->read($input); - $this->triggers []= $elem693; + $elem728 = null; + $elem728 = new \metastore\WMTrigger(); + $xfer += $elem728->read($input); + $this->triggers []= $elem728; } $xfer += $input->readListEnd(); } else { @@ -22304,15 +23170,15 @@ class WMFullResourcePlan { case 5: if ($ftype == TType::LST) { $this->poolTriggers = array(); - $_size694 = 0; - $_etype697 = 0; - $xfer += $input->readListBegin($_etype697, $_size694); - for ($_i698 = 0; $_i698 < $_size694; ++$_i698) + $_size729 = 0; + $_etype732 = 0; + $xfer += $input->readListBegin($_etype732, $_size729); + for ($_i733 = 0; $_i733 < $_size729; ++$_i733) { - $elem699 = null; - $elem699 = new \metastore\WMPoolTrigger(); - $xfer += $elem699->read($input); - $this->poolTriggers []= $elem699; + $elem734 = null; + $elem734 = new \metastore\WMPoolTrigger(); + $xfer += $elem734->read($input); + $this->poolTriggers []= $elem734; } $xfer += $input->readListEnd(); } else { @@ -22348,9 +23214,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->pools)); { - foreach ($this->pools as $iter700) + foreach ($this->pools as $iter735) { - $xfer += $iter700->write($output); + $xfer += $iter735->write($output); } } $output->writeListEnd(); @@ -22365,9 +23231,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->mappings)); { - foreach ($this->mappings as $iter701) + foreach ($this->mappings as $iter736) { - $xfer += $iter701->write($output); + $xfer += $iter736->write($output); } } $output->writeListEnd(); @@ -22382,9 +23248,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter702) + foreach ($this->triggers as $iter737) { - $xfer += $iter702->write($output); + $xfer += $iter737->write($output); } } $output->writeListEnd(); @@ -22399,9 +23265,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->poolTriggers)); { - foreach ($this->poolTriggers as $iter703) + foreach ($this->poolTriggers as $iter738) { - $xfer += $iter703->write($output); + $xfer += $iter738->write($output); } } $output->writeListEnd(); @@ -22954,15 +23820,15 @@ class WMGetAllResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->resourcePlans = array(); - $_size704 = 0; - $_etype707 = 0; - $xfer += $input->readListBegin($_etype707, $_size704); - for ($_i708 = 0; $_i708 < $_size704; ++$_i708) + $_size739 = 0; + $_etype742 = 0; + $xfer += $input->readListBegin($_etype742, $_size739); + for ($_i743 = 0; $_i743 < $_size739; ++$_i743) { - $elem709 = null; - $elem709 = new \metastore\WMResourcePlan(); - $xfer += $elem709->read($input); - $this->resourcePlans []= $elem709; + $elem744 = null; + $elem744 = new \metastore\WMResourcePlan(); + $xfer += $elem744->read($input); + $this->resourcePlans []= $elem744; } $xfer += $input->readListEnd(); } else { @@ -22990,9 +23856,9 @@ class WMGetAllResourcePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->resourcePlans)); { - foreach ($this->resourcePlans as $iter710) + foreach ($this->resourcePlans as $iter745) { - $xfer += $iter710->write($output); + $xfer += $iter745->write($output); } } $output->writeListEnd(); @@ -23398,14 +24264,14 @@ class WMValidateResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->errors = array(); - $_size711 = 0; - $_etype714 = 0; - $xfer += $input->readListBegin($_etype714, $_size711); - for ($_i715 = 0; $_i715 < $_size711; ++$_i715) + $_size746 = 0; + $_etype749 = 0; + $xfer += $input->readListBegin($_etype749, $_size746); + for ($_i750 = 0; $_i750 < $_size746; ++$_i750) { - $elem716 = null; - $xfer += $input->readString($elem716); - $this->errors []= $elem716; + $elem751 = null; + $xfer += $input->readString($elem751); + $this->errors []= $elem751; } $xfer += $input->readListEnd(); } else { @@ -23415,14 +24281,14 @@ class WMValidateResourcePlanResponse { case 2: if ($ftype == TType::LST) { $this->warnings = array(); - $_size717 = 0; - $_etype720 = 0; - $xfer += $input->readListBegin($_etype720, $_size717); - for ($_i721 = 0; $_i721 < $_size717; ++$_i721) + $_size752 = 0; + $_etype755 = 0; + $xfer += $input->readListBegin($_etype755, $_size752); + for ($_i756 = 0; $_i756 < $_size752; ++$_i756) { - $elem722 = null; - $xfer += $input->readString($elem722); - $this->warnings []= $elem722; + $elem757 = null; + $xfer += $input->readString($elem757); + $this->warnings []= $elem757; } $xfer += $input->readListEnd(); } else { @@ -23450,9 +24316,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->errors)); { - foreach ($this->errors as $iter723) + foreach ($this->errors as $iter758) { - $xfer += $output->writeString($iter723); + $xfer += $output->writeString($iter758); } } $output->writeListEnd(); @@ -23467,9 +24333,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->warnings)); { - foreach ($this->warnings as $iter724) + foreach ($this->warnings as $iter759) { - $xfer += $output->writeString($iter724); + $xfer += $output->writeString($iter759); } } $output->writeListEnd(); @@ -24142,15 +25008,15 @@ class WMGetTriggersForResourePlanResponse { case 1: if ($ftype == TType::LST) { $this->triggers = array(); - $_size725 = 0; - $_etype728 = 0; - $xfer += $input->readListBegin($_etype728, $_size725); - for ($_i729 = 0; $_i729 < $_size725; ++$_i729) + $_size760 = 0; + $_etype763 = 0; + $xfer += $input->readListBegin($_etype763, $_size760); + for ($_i764 = 0; $_i764 < $_size760; ++$_i764) { - $elem730 = null; - $elem730 = new \metastore\WMTrigger(); - $xfer += $elem730->read($input); - $this->triggers []= $elem730; + $elem765 = null; + $elem765 = new \metastore\WMTrigger(); + $xfer += $elem765->read($input); + $this->triggers []= $elem765; } $xfer += $input->readListEnd(); } else { @@ -24178,9 +25044,9 @@ class WMGetTriggersForResourePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter731) + foreach ($this->triggers as $iter766) { - $xfer += $iter731->write($output); + $xfer += $iter766->write($output); } } $output->writeListEnd(); diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index a8fcea6..9adc906 100755 --- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -166,6 +166,9 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' void abort_txn(AbortTxnRequest rqst)') print(' void abort_txns(AbortTxnsRequest rqst)') print(' void commit_txn(CommitTxnRequest rqst)') + print(' GetOpenWriteIdsResponse get_open_write_ids(GetOpenWriteIdsRequest rqst)') + print(' void add_transactional_table(AddTransactionalTableRequest rqst)') + print(' AllocateTableWriteIdResponse allocate_table_write_id(AllocateTableWriteIdRequest rqst)') print(' LockResponse lock(LockRequest rqst)') print(' LockResponse check_lock(CheckLockRequest rqst)') print(' void unlock(UnlockRequest rqst)') @@ -1128,6 +1131,24 @@ elif cmd == 'commit_txn': sys.exit(1) pp.pprint(client.commit_txn(eval(args[0]),)) +elif cmd == 'get_open_write_ids': + if len(args) != 1: + print('get_open_write_ids requires 1 args') + sys.exit(1) + pp.pprint(client.get_open_write_ids(eval(args[0]),)) + +elif cmd == 'add_transactional_table': + if len(args) != 1: + print('add_transactional_table requires 1 args') + sys.exit(1) + pp.pprint(client.add_transactional_table(eval(args[0]),)) + +elif cmd == 'allocate_table_write_id': + if len(args) != 1: + print('allocate_table_write_id requires 1 args') + sys.exit(1) + pp.pprint(client.allocate_table_write_id(eval(args[0]),)) + elif cmd == 'lock': if len(args) != 1: print('lock requires 1 args') diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 988c01a..be5e7c5 100644 --- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -1166,6 +1166,27 @@ def commit_txn(self, rqst): """ pass + def get_open_write_ids(self, rqst): + """ + Parameters: + - rqst + """ + pass + + def add_transactional_table(self, rqst): + """ + Parameters: + - rqst + """ + pass + + def allocate_table_write_id(self, rqst): + """ + Parameters: + - rqst + """ + pass + def lock(self, rqst): """ Parameters: @@ -6666,6 +6687,109 @@ def recv_commit_txn(self): raise result.o2 return + def get_open_write_ids(self, rqst): + """ + Parameters: + - rqst + """ + self.send_get_open_write_ids(rqst) + return self.recv_get_open_write_ids() + + def send_get_open_write_ids(self, rqst): + self._oprot.writeMessageBegin('get_open_write_ids', TMessageType.CALL, self._seqid) + args = get_open_write_ids_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_open_write_ids(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_open_write_ids_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_open_write_ids failed: unknown result") + + def add_transactional_table(self, rqst): + """ + Parameters: + - rqst + """ + self.send_add_transactional_table(rqst) + self.recv_add_transactional_table() + + def send_add_transactional_table(self, rqst): + self._oprot.writeMessageBegin('add_transactional_table', TMessageType.CALL, self._seqid) + args = add_transactional_table_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_transactional_table(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_transactional_table_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def allocate_table_write_id(self, rqst): + """ + Parameters: + - rqst + """ + self.send_allocate_table_write_id(rqst) + return self.recv_allocate_table_write_id() + + def send_allocate_table_write_id(self, rqst): + self._oprot.writeMessageBegin('allocate_table_write_id', TMessageType.CALL, self._seqid) + args = allocate_table_write_id_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_allocate_table_write_id(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = allocate_table_write_id_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "allocate_table_write_id failed: unknown result") + def lock(self, rqst): """ Parameters: @@ -8192,6 +8316,9 @@ def __init__(self, handler): self._processMap["abort_txn"] = Processor.process_abort_txn self._processMap["abort_txns"] = Processor.process_abort_txns self._processMap["commit_txn"] = Processor.process_commit_txn + self._processMap["get_open_write_ids"] = Processor.process_get_open_write_ids + self._processMap["add_transactional_table"] = Processor.process_add_transactional_table + self._processMap["allocate_table_write_id"] = Processor.process_allocate_table_write_id self._processMap["lock"] = Processor.process_lock self._processMap["check_lock"] = Processor.process_check_lock self._processMap["unlock"] = Processor.process_unlock @@ -11784,6 +11911,81 @@ def process_commit_txn(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_get_open_write_ids(self, seqid, iprot, oprot): + args = get_open_write_ids_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_open_write_ids_result() + try: + result.success = self._handler.get_open_write_ids(args.rqst) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchTxnException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_open_write_ids", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_transactional_table(self, seqid, iprot, oprot): + args = add_transactional_table_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_transactional_table_result() + try: + self._handler.add_transactional_table(args.rqst) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("add_transactional_table", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_allocate_table_write_id(self, seqid, iprot, oprot): + args = allocate_table_write_id_args() + args.read(iprot) + iprot.readMessageEnd() + result = allocate_table_write_id_result() + try: + result.success = self._handler.allocate_table_write_id(args.rqst) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchTxnException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TxnAbortedException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("allocate_table_write_id", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_lock(self, seqid, iprot, oprot): args = lock_args() args.read(iprot) @@ -13638,10 +13840,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype732, _size729) = iprot.readListBegin() - for _i733 in xrange(_size729): - _elem734 = iprot.readString() - self.success.append(_elem734) + (_etype767, _size764) = iprot.readListBegin() + for _i768 in xrange(_size764): + _elem769 = iprot.readString() + self.success.append(_elem769) iprot.readListEnd() else: iprot.skip(ftype) @@ -13664,8 +13866,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter735 in self.success: - oprot.writeString(iter735) + for iter770 in self.success: + oprot.writeString(iter770) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -13770,10 +13972,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype739, _size736) = iprot.readListBegin() - for _i740 in xrange(_size736): - _elem741 = iprot.readString() - self.success.append(_elem741) + (_etype774, _size771) = iprot.readListBegin() + for _i775 in xrange(_size771): + _elem776 = iprot.readString() + self.success.append(_elem776) iprot.readListEnd() else: iprot.skip(ftype) @@ -13796,8 +13998,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter742 in self.success: - oprot.writeString(iter742) + for iter777 in self.success: + oprot.writeString(iter777) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14567,12 +14769,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype744, _vtype745, _size743 ) = iprot.readMapBegin() - for _i747 in xrange(_size743): - _key748 = iprot.readString() - _val749 = Type() - _val749.read(iprot) - self.success[_key748] = _val749 + (_ktype779, _vtype780, _size778 ) = iprot.readMapBegin() + for _i782 in xrange(_size778): + _key783 = iprot.readString() + _val784 = Type() + _val784.read(iprot) + self.success[_key783] = _val784 iprot.readMapEnd() else: iprot.skip(ftype) @@ -14595,9 +14797,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter750,viter751 in self.success.items(): - oprot.writeString(kiter750) - viter751.write(oprot) + for kiter785,viter786 in self.success.items(): + oprot.writeString(kiter785) + viter786.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -14740,11 +14942,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype755, _size752) = iprot.readListBegin() - for _i756 in xrange(_size752): - _elem757 = FieldSchema() - _elem757.read(iprot) - self.success.append(_elem757) + (_etype790, _size787) = iprot.readListBegin() + for _i791 in xrange(_size787): + _elem792 = FieldSchema() + _elem792.read(iprot) + self.success.append(_elem792) iprot.readListEnd() else: iprot.skip(ftype) @@ -14779,8 +14981,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter758 in self.success: - iter758.write(oprot) + for iter793 in self.success: + iter793.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14947,11 +15149,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype762, _size759) = iprot.readListBegin() - for _i763 in xrange(_size759): - _elem764 = FieldSchema() - _elem764.read(iprot) - self.success.append(_elem764) + (_etype797, _size794) = iprot.readListBegin() + for _i798 in xrange(_size794): + _elem799 = FieldSchema() + _elem799.read(iprot) + self.success.append(_elem799) iprot.readListEnd() else: iprot.skip(ftype) @@ -14986,8 +15188,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter765 in self.success: - iter765.write(oprot) + for iter800 in self.success: + iter800.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15140,11 +15342,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype769, _size766) = iprot.readListBegin() - for _i770 in xrange(_size766): - _elem771 = FieldSchema() - _elem771.read(iprot) - self.success.append(_elem771) + (_etype804, _size801) = iprot.readListBegin() + for _i805 in xrange(_size801): + _elem806 = FieldSchema() + _elem806.read(iprot) + self.success.append(_elem806) iprot.readListEnd() else: iprot.skip(ftype) @@ -15179,8 +15381,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter772 in self.success: - iter772.write(oprot) + for iter807 in self.success: + iter807.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15347,11 +15549,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype776, _size773) = iprot.readListBegin() - for _i777 in xrange(_size773): - _elem778 = FieldSchema() - _elem778.read(iprot) - self.success.append(_elem778) + (_etype811, _size808) = iprot.readListBegin() + for _i812 in xrange(_size808): + _elem813 = FieldSchema() + _elem813.read(iprot) + self.success.append(_elem813) iprot.readListEnd() else: iprot.skip(ftype) @@ -15386,8 +15588,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter779 in self.success: - iter779.write(oprot) + for iter814 in self.success: + iter814.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15834,44 +16036,44 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype783, _size780) = iprot.readListBegin() - for _i784 in xrange(_size780): - _elem785 = SQLPrimaryKey() - _elem785.read(iprot) - self.primaryKeys.append(_elem785) + (_etype818, _size815) = iprot.readListBegin() + for _i819 in xrange(_size815): + _elem820 = SQLPrimaryKey() + _elem820.read(iprot) + self.primaryKeys.append(_elem820) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype789, _size786) = iprot.readListBegin() - for _i790 in xrange(_size786): - _elem791 = SQLForeignKey() - _elem791.read(iprot) - self.foreignKeys.append(_elem791) + (_etype824, _size821) = iprot.readListBegin() + for _i825 in xrange(_size821): + _elem826 = SQLForeignKey() + _elem826.read(iprot) + self.foreignKeys.append(_elem826) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype795, _size792) = iprot.readListBegin() - for _i796 in xrange(_size792): - _elem797 = SQLUniqueConstraint() - _elem797.read(iprot) - self.uniqueConstraints.append(_elem797) + (_etype830, _size827) = iprot.readListBegin() + for _i831 in xrange(_size827): + _elem832 = SQLUniqueConstraint() + _elem832.read(iprot) + self.uniqueConstraints.append(_elem832) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype801, _size798) = iprot.readListBegin() - for _i802 in xrange(_size798): - _elem803 = SQLNotNullConstraint() - _elem803.read(iprot) - self.notNullConstraints.append(_elem803) + (_etype836, _size833) = iprot.readListBegin() + for _i837 in xrange(_size833): + _elem838 = SQLNotNullConstraint() + _elem838.read(iprot) + self.notNullConstraints.append(_elem838) iprot.readListEnd() else: iprot.skip(ftype) @@ -15892,29 +16094,29 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter804 in self.primaryKeys: - iter804.write(oprot) + for iter839 in self.primaryKeys: + iter839.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter805 in self.foreignKeys: - iter805.write(oprot) + for iter840 in self.foreignKeys: + iter840.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter806 in self.uniqueConstraints: - iter806.write(oprot) + for iter841 in self.uniqueConstraints: + iter841.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter807 in self.notNullConstraints: - iter807.write(oprot) + for iter842 in self.notNullConstraints: + iter842.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17180,10 +17382,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype811, _size808) = iprot.readListBegin() - for _i812 in xrange(_size808): - _elem813 = iprot.readString() - self.partNames.append(_elem813) + (_etype846, _size843) = iprot.readListBegin() + for _i847 in xrange(_size843): + _elem848 = iprot.readString() + self.partNames.append(_elem848) iprot.readListEnd() else: iprot.skip(ftype) @@ -17208,8 +17410,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter814 in self.partNames: - oprot.writeString(iter814) + for iter849 in self.partNames: + oprot.writeString(iter849) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17409,10 +17611,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype818, _size815) = iprot.readListBegin() - for _i819 in xrange(_size815): - _elem820 = iprot.readString() - self.success.append(_elem820) + (_etype853, _size850) = iprot.readListBegin() + for _i854 in xrange(_size850): + _elem855 = iprot.readString() + self.success.append(_elem855) iprot.readListEnd() else: iprot.skip(ftype) @@ -17435,8 +17637,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter821 in self.success: - oprot.writeString(iter821) + for iter856 in self.success: + oprot.writeString(iter856) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17586,10 +17788,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype825, _size822) = iprot.readListBegin() - for _i826 in xrange(_size822): - _elem827 = iprot.readString() - self.success.append(_elem827) + (_etype860, _size857) = iprot.readListBegin() + for _i861 in xrange(_size857): + _elem862 = iprot.readString() + self.success.append(_elem862) iprot.readListEnd() else: iprot.skip(ftype) @@ -17612,8 +17814,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter828 in self.success: - oprot.writeString(iter828) + for iter863 in self.success: + oprot.writeString(iter863) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17737,10 +17939,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype832, _size829) = iprot.readListBegin() - for _i833 in xrange(_size829): - _elem834 = iprot.readString() - self.success.append(_elem834) + (_etype867, _size864) = iprot.readListBegin() + for _i868 in xrange(_size864): + _elem869 = iprot.readString() + self.success.append(_elem869) iprot.readListEnd() else: iprot.skip(ftype) @@ -17763,8 +17965,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter835 in self.success: - oprot.writeString(iter835) + for iter870 in self.success: + oprot.writeString(iter870) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17837,10 +18039,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype839, _size836) = iprot.readListBegin() - for _i840 in xrange(_size836): - _elem841 = iprot.readString() - self.tbl_types.append(_elem841) + (_etype874, _size871) = iprot.readListBegin() + for _i875 in xrange(_size871): + _elem876 = iprot.readString() + self.tbl_types.append(_elem876) iprot.readListEnd() else: iprot.skip(ftype) @@ -17865,8 +18067,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter842 in self.tbl_types: - oprot.writeString(iter842) + for iter877 in self.tbl_types: + oprot.writeString(iter877) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17922,11 +18124,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype846, _size843) = iprot.readListBegin() - for _i847 in xrange(_size843): - _elem848 = TableMeta() - _elem848.read(iprot) - self.success.append(_elem848) + (_etype881, _size878) = iprot.readListBegin() + for _i882 in xrange(_size878): + _elem883 = TableMeta() + _elem883.read(iprot) + self.success.append(_elem883) iprot.readListEnd() else: iprot.skip(ftype) @@ -17949,8 +18151,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter849 in self.success: - iter849.write(oprot) + for iter884 in self.success: + iter884.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18074,10 +18276,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype853, _size850) = iprot.readListBegin() - for _i854 in xrange(_size850): - _elem855 = iprot.readString() - self.success.append(_elem855) + (_etype888, _size885) = iprot.readListBegin() + for _i889 in xrange(_size885): + _elem890 = iprot.readString() + self.success.append(_elem890) iprot.readListEnd() else: iprot.skip(ftype) @@ -18100,8 +18302,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter856 in self.success: - oprot.writeString(iter856) + for iter891 in self.success: + oprot.writeString(iter891) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18337,10 +18539,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype860, _size857) = iprot.readListBegin() - for _i861 in xrange(_size857): - _elem862 = iprot.readString() - self.tbl_names.append(_elem862) + (_etype895, _size892) = iprot.readListBegin() + for _i896 in xrange(_size892): + _elem897 = iprot.readString() + self.tbl_names.append(_elem897) iprot.readListEnd() else: iprot.skip(ftype) @@ -18361,8 +18563,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter863 in self.tbl_names: - oprot.writeString(iter863) + for iter898 in self.tbl_names: + oprot.writeString(iter898) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18414,11 +18616,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype867, _size864) = iprot.readListBegin() - for _i868 in xrange(_size864): - _elem869 = Table() - _elem869.read(iprot) - self.success.append(_elem869) + (_etype902, _size899) = iprot.readListBegin() + for _i903 in xrange(_size899): + _elem904 = Table() + _elem904.read(iprot) + self.success.append(_elem904) iprot.readListEnd() else: iprot.skip(ftype) @@ -18435,8 +18637,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter870 in self.success: - iter870.write(oprot) + for iter905 in self.success: + iter905.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18828,10 +19030,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype874, _size871) = iprot.readListBegin() - for _i875 in xrange(_size871): - _elem876 = iprot.readString() - self.tbl_names.append(_elem876) + (_etype909, _size906) = iprot.readListBegin() + for _i910 in xrange(_size906): + _elem911 = iprot.readString() + self.tbl_names.append(_elem911) iprot.readListEnd() else: iprot.skip(ftype) @@ -18852,8 +19054,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter877 in self.tbl_names: - oprot.writeString(iter877) + for iter912 in self.tbl_names: + oprot.writeString(iter912) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18914,12 +19116,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype879, _vtype880, _size878 ) = iprot.readMapBegin() - for _i882 in xrange(_size878): - _key883 = iprot.readString() - _val884 = Materialization() - _val884.read(iprot) - self.success[_key883] = _val884 + (_ktype914, _vtype915, _size913 ) = iprot.readMapBegin() + for _i917 in xrange(_size913): + _key918 = iprot.readString() + _val919 = Materialization() + _val919.read(iprot) + self.success[_key918] = _val919 iprot.readMapEnd() else: iprot.skip(ftype) @@ -18954,9 +19156,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter885,viter886 in self.success.items(): - oprot.writeString(kiter885) - viter886.write(oprot) + for kiter920,viter921 in self.success.items(): + oprot.writeString(kiter920) + viter921.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19122,10 +19324,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype890, _size887) = iprot.readListBegin() - for _i891 in xrange(_size887): - _elem892 = iprot.readString() - self.success.append(_elem892) + (_etype925, _size922) = iprot.readListBegin() + for _i926 in xrange(_size922): + _elem927 = iprot.readString() + self.success.append(_elem927) iprot.readListEnd() else: iprot.skip(ftype) @@ -19160,8 +19362,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter893 in self.success: - oprot.writeString(iter893) + for iter928 in self.success: + oprot.writeString(iter928) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20131,11 +20333,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype897, _size894) = iprot.readListBegin() - for _i898 in xrange(_size894): - _elem899 = Partition() - _elem899.read(iprot) - self.new_parts.append(_elem899) + (_etype932, _size929) = iprot.readListBegin() + for _i933 in xrange(_size929): + _elem934 = Partition() + _elem934.read(iprot) + self.new_parts.append(_elem934) iprot.readListEnd() else: iprot.skip(ftype) @@ -20152,8 +20354,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter900 in self.new_parts: - iter900.write(oprot) + for iter935 in self.new_parts: + iter935.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20311,11 +20513,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype904, _size901) = iprot.readListBegin() - for _i905 in xrange(_size901): - _elem906 = PartitionSpec() - _elem906.read(iprot) - self.new_parts.append(_elem906) + (_etype939, _size936) = iprot.readListBegin() + for _i940 in xrange(_size936): + _elem941 = PartitionSpec() + _elem941.read(iprot) + self.new_parts.append(_elem941) iprot.readListEnd() else: iprot.skip(ftype) @@ -20332,8 +20534,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter907 in self.new_parts: - iter907.write(oprot) + for iter942 in self.new_parts: + iter942.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20507,10 +20709,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype911, _size908) = iprot.readListBegin() - for _i912 in xrange(_size908): - _elem913 = iprot.readString() - self.part_vals.append(_elem913) + (_etype946, _size943) = iprot.readListBegin() + for _i947 in xrange(_size943): + _elem948 = iprot.readString() + self.part_vals.append(_elem948) iprot.readListEnd() else: iprot.skip(ftype) @@ -20535,8 +20737,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter914 in self.part_vals: - oprot.writeString(iter914) + for iter949 in self.part_vals: + oprot.writeString(iter949) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20889,10 +21091,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype918, _size915) = iprot.readListBegin() - for _i919 in xrange(_size915): - _elem920 = iprot.readString() - self.part_vals.append(_elem920) + (_etype953, _size950) = iprot.readListBegin() + for _i954 in xrange(_size950): + _elem955 = iprot.readString() + self.part_vals.append(_elem955) iprot.readListEnd() else: iprot.skip(ftype) @@ -20923,8 +21125,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter921 in self.part_vals: - oprot.writeString(iter921) + for iter956 in self.part_vals: + oprot.writeString(iter956) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -21519,10 +21721,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype925, _size922) = iprot.readListBegin() - for _i926 in xrange(_size922): - _elem927 = iprot.readString() - self.part_vals.append(_elem927) + (_etype960, _size957) = iprot.readListBegin() + for _i961 in xrange(_size957): + _elem962 = iprot.readString() + self.part_vals.append(_elem962) iprot.readListEnd() else: iprot.skip(ftype) @@ -21552,8 +21754,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter928 in self.part_vals: - oprot.writeString(iter928) + for iter963 in self.part_vals: + oprot.writeString(iter963) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -21726,10 +21928,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype932, _size929) = iprot.readListBegin() - for _i933 in xrange(_size929): - _elem934 = iprot.readString() - self.part_vals.append(_elem934) + (_etype967, _size964) = iprot.readListBegin() + for _i968 in xrange(_size964): + _elem969 = iprot.readString() + self.part_vals.append(_elem969) iprot.readListEnd() else: iprot.skip(ftype) @@ -21765,8 +21967,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter935 in self.part_vals: - oprot.writeString(iter935) + for iter970 in self.part_vals: + oprot.writeString(iter970) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -22503,10 +22705,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype939, _size936) = iprot.readListBegin() - for _i940 in xrange(_size936): - _elem941 = iprot.readString() - self.part_vals.append(_elem941) + (_etype974, _size971) = iprot.readListBegin() + for _i975 in xrange(_size971): + _elem976 = iprot.readString() + self.part_vals.append(_elem976) iprot.readListEnd() else: iprot.skip(ftype) @@ -22531,8 +22733,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter942 in self.part_vals: - oprot.writeString(iter942) + for iter977 in self.part_vals: + oprot.writeString(iter977) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22691,11 +22893,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype944, _vtype945, _size943 ) = iprot.readMapBegin() - for _i947 in xrange(_size943): - _key948 = iprot.readString() - _val949 = iprot.readString() - self.partitionSpecs[_key948] = _val949 + (_ktype979, _vtype980, _size978 ) = iprot.readMapBegin() + for _i982 in xrange(_size978): + _key983 = iprot.readString() + _val984 = iprot.readString() + self.partitionSpecs[_key983] = _val984 iprot.readMapEnd() else: iprot.skip(ftype) @@ -22732,9 +22934,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter950,viter951 in self.partitionSpecs.items(): - oprot.writeString(kiter950) - oprot.writeString(viter951) + for kiter985,viter986 in self.partitionSpecs.items(): + oprot.writeString(kiter985) + oprot.writeString(viter986) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -22939,11 +23141,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype953, _vtype954, _size952 ) = iprot.readMapBegin() - for _i956 in xrange(_size952): - _key957 = iprot.readString() - _val958 = iprot.readString() - self.partitionSpecs[_key957] = _val958 + (_ktype988, _vtype989, _size987 ) = iprot.readMapBegin() + for _i991 in xrange(_size987): + _key992 = iprot.readString() + _val993 = iprot.readString() + self.partitionSpecs[_key992] = _val993 iprot.readMapEnd() else: iprot.skip(ftype) @@ -22980,9 +23182,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter959,viter960 in self.partitionSpecs.items(): - oprot.writeString(kiter959) - oprot.writeString(viter960) + for kiter994,viter995 in self.partitionSpecs.items(): + oprot.writeString(kiter994) + oprot.writeString(viter995) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -23065,11 +23267,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype964, _size961) = iprot.readListBegin() - for _i965 in xrange(_size961): - _elem966 = Partition() - _elem966.read(iprot) - self.success.append(_elem966) + (_etype999, _size996) = iprot.readListBegin() + for _i1000 in xrange(_size996): + _elem1001 = Partition() + _elem1001.read(iprot) + self.success.append(_elem1001) iprot.readListEnd() else: iprot.skip(ftype) @@ -23110,8 +23312,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter967 in self.success: - iter967.write(oprot) + for iter1002 in self.success: + iter1002.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23205,10 +23407,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype971, _size968) = iprot.readListBegin() - for _i972 in xrange(_size968): - _elem973 = iprot.readString() - self.part_vals.append(_elem973) + (_etype1006, _size1003) = iprot.readListBegin() + for _i1007 in xrange(_size1003): + _elem1008 = iprot.readString() + self.part_vals.append(_elem1008) iprot.readListEnd() else: iprot.skip(ftype) @@ -23220,10 +23422,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype977, _size974) = iprot.readListBegin() - for _i978 in xrange(_size974): - _elem979 = iprot.readString() - self.group_names.append(_elem979) + (_etype1012, _size1009) = iprot.readListBegin() + for _i1013 in xrange(_size1009): + _elem1014 = iprot.readString() + self.group_names.append(_elem1014) iprot.readListEnd() else: iprot.skip(ftype) @@ -23248,8 +23450,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter980 in self.part_vals: - oprot.writeString(iter980) + for iter1015 in self.part_vals: + oprot.writeString(iter1015) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -23259,8 +23461,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter981 in self.group_names: - oprot.writeString(iter981) + for iter1016 in self.group_names: + oprot.writeString(iter1016) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23689,11 +23891,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype985, _size982) = iprot.readListBegin() - for _i986 in xrange(_size982): - _elem987 = Partition() - _elem987.read(iprot) - self.success.append(_elem987) + (_etype1020, _size1017) = iprot.readListBegin() + for _i1021 in xrange(_size1017): + _elem1022 = Partition() + _elem1022.read(iprot) + self.success.append(_elem1022) iprot.readListEnd() else: iprot.skip(ftype) @@ -23722,8 +23924,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter988 in self.success: - iter988.write(oprot) + for iter1023 in self.success: + iter1023.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23817,10 +24019,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype992, _size989) = iprot.readListBegin() - for _i993 in xrange(_size989): - _elem994 = iprot.readString() - self.group_names.append(_elem994) + (_etype1027, _size1024) = iprot.readListBegin() + for _i1028 in xrange(_size1024): + _elem1029 = iprot.readString() + self.group_names.append(_elem1029) iprot.readListEnd() else: iprot.skip(ftype) @@ -23853,8 +24055,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter995 in self.group_names: - oprot.writeString(iter995) + for iter1030 in self.group_names: + oprot.writeString(iter1030) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23915,11 +24117,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype999, _size996) = iprot.readListBegin() - for _i1000 in xrange(_size996): - _elem1001 = Partition() - _elem1001.read(iprot) - self.success.append(_elem1001) + (_etype1034, _size1031) = iprot.readListBegin() + for _i1035 in xrange(_size1031): + _elem1036 = Partition() + _elem1036.read(iprot) + self.success.append(_elem1036) iprot.readListEnd() else: iprot.skip(ftype) @@ -23948,8 +24150,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1002 in self.success: - iter1002.write(oprot) + for iter1037 in self.success: + iter1037.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24107,11 +24309,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1006, _size1003) = iprot.readListBegin() - for _i1007 in xrange(_size1003): - _elem1008 = PartitionSpec() - _elem1008.read(iprot) - self.success.append(_elem1008) + (_etype1041, _size1038) = iprot.readListBegin() + for _i1042 in xrange(_size1038): + _elem1043 = PartitionSpec() + _elem1043.read(iprot) + self.success.append(_elem1043) iprot.readListEnd() else: iprot.skip(ftype) @@ -24140,8 +24342,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1009 in self.success: - iter1009.write(oprot) + for iter1044 in self.success: + iter1044.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24299,10 +24501,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1013, _size1010) = iprot.readListBegin() - for _i1014 in xrange(_size1010): - _elem1015 = iprot.readString() - self.success.append(_elem1015) + (_etype1048, _size1045) = iprot.readListBegin() + for _i1049 in xrange(_size1045): + _elem1050 = iprot.readString() + self.success.append(_elem1050) iprot.readListEnd() else: iprot.skip(ftype) @@ -24331,8 +24533,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1016 in self.success: - oprot.writeString(iter1016) + for iter1051 in self.success: + oprot.writeString(iter1051) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24572,10 +24774,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1020, _size1017) = iprot.readListBegin() - for _i1021 in xrange(_size1017): - _elem1022 = iprot.readString() - self.part_vals.append(_elem1022) + (_etype1055, _size1052) = iprot.readListBegin() + for _i1056 in xrange(_size1052): + _elem1057 = iprot.readString() + self.part_vals.append(_elem1057) iprot.readListEnd() else: iprot.skip(ftype) @@ -24605,8 +24807,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1023 in self.part_vals: - oprot.writeString(iter1023) + for iter1058 in self.part_vals: + oprot.writeString(iter1058) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -24670,11 +24872,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1027, _size1024) = iprot.readListBegin() - for _i1028 in xrange(_size1024): - _elem1029 = Partition() - _elem1029.read(iprot) - self.success.append(_elem1029) + (_etype1062, _size1059) = iprot.readListBegin() + for _i1063 in xrange(_size1059): + _elem1064 = Partition() + _elem1064.read(iprot) + self.success.append(_elem1064) iprot.readListEnd() else: iprot.skip(ftype) @@ -24703,8 +24905,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1030 in self.success: - iter1030.write(oprot) + for iter1065 in self.success: + iter1065.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24791,10 +24993,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1034, _size1031) = iprot.readListBegin() - for _i1035 in xrange(_size1031): - _elem1036 = iprot.readString() - self.part_vals.append(_elem1036) + (_etype1069, _size1066) = iprot.readListBegin() + for _i1070 in xrange(_size1066): + _elem1071 = iprot.readString() + self.part_vals.append(_elem1071) iprot.readListEnd() else: iprot.skip(ftype) @@ -24811,10 +25013,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1040, _size1037) = iprot.readListBegin() - for _i1041 in xrange(_size1037): - _elem1042 = iprot.readString() - self.group_names.append(_elem1042) + (_etype1075, _size1072) = iprot.readListBegin() + for _i1076 in xrange(_size1072): + _elem1077 = iprot.readString() + self.group_names.append(_elem1077) iprot.readListEnd() else: iprot.skip(ftype) @@ -24839,8 +25041,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1043 in self.part_vals: - oprot.writeString(iter1043) + for iter1078 in self.part_vals: + oprot.writeString(iter1078) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -24854,8 +25056,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1044 in self.group_names: - oprot.writeString(iter1044) + for iter1079 in self.group_names: + oprot.writeString(iter1079) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24917,11 +25119,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1048, _size1045) = iprot.readListBegin() - for _i1049 in xrange(_size1045): - _elem1050 = Partition() - _elem1050.read(iprot) - self.success.append(_elem1050) + (_etype1083, _size1080) = iprot.readListBegin() + for _i1084 in xrange(_size1080): + _elem1085 = Partition() + _elem1085.read(iprot) + self.success.append(_elem1085) iprot.readListEnd() else: iprot.skip(ftype) @@ -24950,8 +25152,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1051 in self.success: - iter1051.write(oprot) + for iter1086 in self.success: + iter1086.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25032,10 +25234,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1055, _size1052) = iprot.readListBegin() - for _i1056 in xrange(_size1052): - _elem1057 = iprot.readString() - self.part_vals.append(_elem1057) + (_etype1090, _size1087) = iprot.readListBegin() + for _i1091 in xrange(_size1087): + _elem1092 = iprot.readString() + self.part_vals.append(_elem1092) iprot.readListEnd() else: iprot.skip(ftype) @@ -25065,8 +25267,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1058 in self.part_vals: - oprot.writeString(iter1058) + for iter1093 in self.part_vals: + oprot.writeString(iter1093) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -25130,10 +25332,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1062, _size1059) = iprot.readListBegin() - for _i1063 in xrange(_size1059): - _elem1064 = iprot.readString() - self.success.append(_elem1064) + (_etype1097, _size1094) = iprot.readListBegin() + for _i1098 in xrange(_size1094): + _elem1099 = iprot.readString() + self.success.append(_elem1099) iprot.readListEnd() else: iprot.skip(ftype) @@ -25162,8 +25364,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1065 in self.success: - oprot.writeString(iter1065) + for iter1100 in self.success: + oprot.writeString(iter1100) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25334,11 +25536,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1069, _size1066) = iprot.readListBegin() - for _i1070 in xrange(_size1066): - _elem1071 = Partition() - _elem1071.read(iprot) - self.success.append(_elem1071) + (_etype1104, _size1101) = iprot.readListBegin() + for _i1105 in xrange(_size1101): + _elem1106 = Partition() + _elem1106.read(iprot) + self.success.append(_elem1106) iprot.readListEnd() else: iprot.skip(ftype) @@ -25367,8 +25569,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1072 in self.success: - iter1072.write(oprot) + for iter1107 in self.success: + iter1107.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25539,11 +25741,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1076, _size1073) = iprot.readListBegin() - for _i1077 in xrange(_size1073): - _elem1078 = PartitionSpec() - _elem1078.read(iprot) - self.success.append(_elem1078) + (_etype1111, _size1108) = iprot.readListBegin() + for _i1112 in xrange(_size1108): + _elem1113 = PartitionSpec() + _elem1113.read(iprot) + self.success.append(_elem1113) iprot.readListEnd() else: iprot.skip(ftype) @@ -25572,8 +25774,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1079 in self.success: - iter1079.write(oprot) + for iter1114 in self.success: + iter1114.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25993,10 +26195,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1083, _size1080) = iprot.readListBegin() - for _i1084 in xrange(_size1080): - _elem1085 = iprot.readString() - self.names.append(_elem1085) + (_etype1118, _size1115) = iprot.readListBegin() + for _i1119 in xrange(_size1115): + _elem1120 = iprot.readString() + self.names.append(_elem1120) iprot.readListEnd() else: iprot.skip(ftype) @@ -26021,8 +26223,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1086 in self.names: - oprot.writeString(iter1086) + for iter1121 in self.names: + oprot.writeString(iter1121) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26081,11 +26283,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1090, _size1087) = iprot.readListBegin() - for _i1091 in xrange(_size1087): - _elem1092 = Partition() - _elem1092.read(iprot) - self.success.append(_elem1092) + (_etype1125, _size1122) = iprot.readListBegin() + for _i1126 in xrange(_size1122): + _elem1127 = Partition() + _elem1127.read(iprot) + self.success.append(_elem1127) iprot.readListEnd() else: iprot.skip(ftype) @@ -26114,8 +26316,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1093 in self.success: - iter1093.write(oprot) + for iter1128 in self.success: + iter1128.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26365,11 +26567,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1097, _size1094) = iprot.readListBegin() - for _i1098 in xrange(_size1094): - _elem1099 = Partition() - _elem1099.read(iprot) - self.new_parts.append(_elem1099) + (_etype1132, _size1129) = iprot.readListBegin() + for _i1133 in xrange(_size1129): + _elem1134 = Partition() + _elem1134.read(iprot) + self.new_parts.append(_elem1134) iprot.readListEnd() else: iprot.skip(ftype) @@ -26394,8 +26596,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1100 in self.new_parts: - iter1100.write(oprot) + for iter1135 in self.new_parts: + iter1135.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26548,11 +26750,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1104, _size1101) = iprot.readListBegin() - for _i1105 in xrange(_size1101): - _elem1106 = Partition() - _elem1106.read(iprot) - self.new_parts.append(_elem1106) + (_etype1139, _size1136) = iprot.readListBegin() + for _i1140 in xrange(_size1136): + _elem1141 = Partition() + _elem1141.read(iprot) + self.new_parts.append(_elem1141) iprot.readListEnd() else: iprot.skip(ftype) @@ -26583,8 +26785,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1107 in self.new_parts: - iter1107.write(oprot) + for iter1142 in self.new_parts: + iter1142.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -26928,10 +27130,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1111, _size1108) = iprot.readListBegin() - for _i1112 in xrange(_size1108): - _elem1113 = iprot.readString() - self.part_vals.append(_elem1113) + (_etype1146, _size1143) = iprot.readListBegin() + for _i1147 in xrange(_size1143): + _elem1148 = iprot.readString() + self.part_vals.append(_elem1148) iprot.readListEnd() else: iprot.skip(ftype) @@ -26962,8 +27164,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1114 in self.part_vals: - oprot.writeString(iter1114) + for iter1149 in self.part_vals: + oprot.writeString(iter1149) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -27105,10 +27307,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1118, _size1115) = iprot.readListBegin() - for _i1119 in xrange(_size1115): - _elem1120 = iprot.readString() - self.part_vals.append(_elem1120) + (_etype1153, _size1150) = iprot.readListBegin() + for _i1154 in xrange(_size1150): + _elem1155 = iprot.readString() + self.part_vals.append(_elem1155) iprot.readListEnd() else: iprot.skip(ftype) @@ -27130,8 +27332,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1121 in self.part_vals: - oprot.writeString(iter1121) + for iter1156 in self.part_vals: + oprot.writeString(iter1156) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -27489,10 +27691,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1125, _size1122) = iprot.readListBegin() - for _i1126 in xrange(_size1122): - _elem1127 = iprot.readString() - self.success.append(_elem1127) + (_etype1160, _size1157) = iprot.readListBegin() + for _i1161 in xrange(_size1157): + _elem1162 = iprot.readString() + self.success.append(_elem1162) iprot.readListEnd() else: iprot.skip(ftype) @@ -27515,8 +27717,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1128 in self.success: - oprot.writeString(iter1128) + for iter1163 in self.success: + oprot.writeString(iter1163) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27640,11 +27842,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1130, _vtype1131, _size1129 ) = iprot.readMapBegin() - for _i1133 in xrange(_size1129): - _key1134 = iprot.readString() - _val1135 = iprot.readString() - self.success[_key1134] = _val1135 + (_ktype1165, _vtype1166, _size1164 ) = iprot.readMapBegin() + for _i1168 in xrange(_size1164): + _key1169 = iprot.readString() + _val1170 = iprot.readString() + self.success[_key1169] = _val1170 iprot.readMapEnd() else: iprot.skip(ftype) @@ -27667,9 +27869,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1136,viter1137 in self.success.items(): - oprot.writeString(kiter1136) - oprot.writeString(viter1137) + for kiter1171,viter1172 in self.success.items(): + oprot.writeString(kiter1171) + oprot.writeString(viter1172) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27745,11 +27947,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1139, _vtype1140, _size1138 ) = iprot.readMapBegin() - for _i1142 in xrange(_size1138): - _key1143 = iprot.readString() - _val1144 = iprot.readString() - self.part_vals[_key1143] = _val1144 + (_ktype1174, _vtype1175, _size1173 ) = iprot.readMapBegin() + for _i1177 in xrange(_size1173): + _key1178 = iprot.readString() + _val1179 = iprot.readString() + self.part_vals[_key1178] = _val1179 iprot.readMapEnd() else: iprot.skip(ftype) @@ -27779,9 +27981,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1145,viter1146 in self.part_vals.items(): - oprot.writeString(kiter1145) - oprot.writeString(viter1146) + for kiter1180,viter1181 in self.part_vals.items(): + oprot.writeString(kiter1180) + oprot.writeString(viter1181) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -27995,11 +28197,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1148, _vtype1149, _size1147 ) = iprot.readMapBegin() - for _i1151 in xrange(_size1147): - _key1152 = iprot.readString() - _val1153 = iprot.readString() - self.part_vals[_key1152] = _val1153 + (_ktype1183, _vtype1184, _size1182 ) = iprot.readMapBegin() + for _i1186 in xrange(_size1182): + _key1187 = iprot.readString() + _val1188 = iprot.readString() + self.part_vals[_key1187] = _val1188 iprot.readMapEnd() else: iprot.skip(ftype) @@ -28029,9 +28231,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1154,viter1155 in self.part_vals.items(): - oprot.writeString(kiter1154) - oprot.writeString(viter1155) + for kiter1189,viter1190 in self.part_vals.items(): + oprot.writeString(kiter1189) + oprot.writeString(viter1190) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -29086,11 +29288,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1159, _size1156) = iprot.readListBegin() - for _i1160 in xrange(_size1156): - _elem1161 = Index() - _elem1161.read(iprot) - self.success.append(_elem1161) + (_etype1194, _size1191) = iprot.readListBegin() + for _i1195 in xrange(_size1191): + _elem1196 = Index() + _elem1196.read(iprot) + self.success.append(_elem1196) iprot.readListEnd() else: iprot.skip(ftype) @@ -29119,8 +29321,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1162 in self.success: - iter1162.write(oprot) + for iter1197 in self.success: + iter1197.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29275,10 +29477,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1166, _size1163) = iprot.readListBegin() - for _i1167 in xrange(_size1163): - _elem1168 = iprot.readString() - self.success.append(_elem1168) + (_etype1201, _size1198) = iprot.readListBegin() + for _i1202 in xrange(_size1198): + _elem1203 = iprot.readString() + self.success.append(_elem1203) iprot.readListEnd() else: iprot.skip(ftype) @@ -29301,8 +29503,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1169 in self.success: - oprot.writeString(iter1169) + for iter1204 in self.success: + oprot.writeString(iter1204) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -32486,10 +32688,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1173, _size1170) = iprot.readListBegin() - for _i1174 in xrange(_size1170): - _elem1175 = iprot.readString() - self.success.append(_elem1175) + (_etype1208, _size1205) = iprot.readListBegin() + for _i1209 in xrange(_size1205): + _elem1210 = iprot.readString() + self.success.append(_elem1210) iprot.readListEnd() else: iprot.skip(ftype) @@ -32512,8 +32714,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1176 in self.success: - oprot.writeString(iter1176) + for iter1211 in self.success: + oprot.writeString(iter1211) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33201,10 +33403,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1180, _size1177) = iprot.readListBegin() - for _i1181 in xrange(_size1177): - _elem1182 = iprot.readString() - self.success.append(_elem1182) + (_etype1215, _size1212) = iprot.readListBegin() + for _i1216 in xrange(_size1212): + _elem1217 = iprot.readString() + self.success.append(_elem1217) iprot.readListEnd() else: iprot.skip(ftype) @@ -33227,8 +33429,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1183 in self.success: - oprot.writeString(iter1183) + for iter1218 in self.success: + oprot.writeString(iter1218) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33742,11 +33944,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1187, _size1184) = iprot.readListBegin() - for _i1188 in xrange(_size1184): - _elem1189 = Role() - _elem1189.read(iprot) - self.success.append(_elem1189) + (_etype1222, _size1219) = iprot.readListBegin() + for _i1223 in xrange(_size1219): + _elem1224 = Role() + _elem1224.read(iprot) + self.success.append(_elem1224) iprot.readListEnd() else: iprot.skip(ftype) @@ -33769,8 +33971,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1190 in self.success: - iter1190.write(oprot) + for iter1225 in self.success: + iter1225.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -34279,10 +34481,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1194, _size1191) = iprot.readListBegin() - for _i1195 in xrange(_size1191): - _elem1196 = iprot.readString() - self.group_names.append(_elem1196) + (_etype1229, _size1226) = iprot.readListBegin() + for _i1230 in xrange(_size1226): + _elem1231 = iprot.readString() + self.group_names.append(_elem1231) iprot.readListEnd() else: iprot.skip(ftype) @@ -34307,8 +34509,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1197 in self.group_names: - oprot.writeString(iter1197) + for iter1232 in self.group_names: + oprot.writeString(iter1232) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -34535,11 +34737,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1201, _size1198) = iprot.readListBegin() - for _i1202 in xrange(_size1198): - _elem1203 = HiveObjectPrivilege() - _elem1203.read(iprot) - self.success.append(_elem1203) + (_etype1236, _size1233) = iprot.readListBegin() + for _i1237 in xrange(_size1233): + _elem1238 = HiveObjectPrivilege() + _elem1238.read(iprot) + self.success.append(_elem1238) iprot.readListEnd() else: iprot.skip(ftype) @@ -34562,8 +34764,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1204 in self.success: - iter1204.write(oprot) + for iter1239 in self.success: + iter1239.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -35061,10 +35263,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1208, _size1205) = iprot.readListBegin() - for _i1209 in xrange(_size1205): - _elem1210 = iprot.readString() - self.group_names.append(_elem1210) + (_etype1243, _size1240) = iprot.readListBegin() + for _i1244 in xrange(_size1240): + _elem1245 = iprot.readString() + self.group_names.append(_elem1245) iprot.readListEnd() else: iprot.skip(ftype) @@ -35085,8 +35287,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1211 in self.group_names: - oprot.writeString(iter1211) + for iter1246 in self.group_names: + oprot.writeString(iter1246) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -35141,10 +35343,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1215, _size1212) = iprot.readListBegin() - for _i1216 in xrange(_size1212): - _elem1217 = iprot.readString() - self.success.append(_elem1217) + (_etype1250, _size1247) = iprot.readListBegin() + for _i1251 in xrange(_size1247): + _elem1252 = iprot.readString() + self.success.append(_elem1252) iprot.readListEnd() else: iprot.skip(ftype) @@ -35167,8 +35369,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1218 in self.success: - oprot.writeString(iter1218) + for iter1253 in self.success: + oprot.writeString(iter1253) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -36100,10 +36302,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1222, _size1219) = iprot.readListBegin() - for _i1223 in xrange(_size1219): - _elem1224 = iprot.readString() - self.success.append(_elem1224) + (_etype1257, _size1254) = iprot.readListBegin() + for _i1258 in xrange(_size1254): + _elem1259 = iprot.readString() + self.success.append(_elem1259) iprot.readListEnd() else: iprot.skip(ftype) @@ -36120,8 +36322,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1225 in self.success: - oprot.writeString(iter1225) + for iter1260 in self.success: + oprot.writeString(iter1260) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -36648,10 +36850,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1229, _size1226) = iprot.readListBegin() - for _i1230 in xrange(_size1226): - _elem1231 = iprot.readString() - self.success.append(_elem1231) + (_etype1264, _size1261) = iprot.readListBegin() + for _i1265 in xrange(_size1261): + _elem1266 = iprot.readString() + self.success.append(_elem1266) iprot.readListEnd() else: iprot.skip(ftype) @@ -36668,8 +36870,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1232 in self.success: - oprot.writeString(iter1232) + for iter1267 in self.success: + oprot.writeString(iter1267) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -36983,18 +37185,282 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class open_txns_result: +class open_txns_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (OpenTxnsResponse, OpenTxnsResponse.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = OpenTxnsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('open_txns_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class abort_txn_args: + """ + Attributes: + - rqst + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (AbortTxnRequest, AbortTxnRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, rqst=None,): + self.rqst = rqst + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = AbortTxnRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('abort_txn_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.rqst) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class abort_txn_result: + """ + Attributes: + - o1 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + ) + + def __init__(self, o1=None,): + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('abort_txn_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class abort_txns_args: + """ + Attributes: + - rqst + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (AbortTxnsRequest, AbortTxnsRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, rqst=None,): + self.rqst = rqst + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = AbortTxnsRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('abort_txns_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.rqst) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class abort_txns_result: """ Attributes: - - success + - o1 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (OpenTxnsResponse, OpenTxnsResponse.thrift_spec), None, ), # 0 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 ) - def __init__(self, success=None,): - self.success = success + def __init__(self, o1=None,): + self.o1 = o1 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37005,10 +37471,10 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: + if fid == 1: if ftype == TType.STRUCT: - self.success = OpenTxnsResponse() - self.success.read(iprot) + self.o1 = NoSuchTxnException() + self.o1.read(iprot) else: iprot.skip(ftype) else: @@ -37020,10 +37486,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('open_txns_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) + oprot.writeStructBegin('abort_txns_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37034,7 +37500,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) return value def __repr__(self): @@ -37048,7 +37514,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class abort_txn_args: +class commit_txn_args: """ Attributes: - rqst @@ -37056,7 +37522,7 @@ class abort_txn_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (AbortTxnRequest, AbortTxnRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (CommitTxnRequest, CommitTxnRequest.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -37073,7 +37539,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = AbortTxnRequest() + self.rqst = CommitTxnRequest() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -37086,7 +37552,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('abort_txn_args') + oprot.writeStructBegin('commit_txn_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -37114,19 +37580,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class abort_txn_result: +class commit_txn_result: """ Attributes: - o1 + - o2 """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 ) - def __init__(self, o1=None,): + def __init__(self, o1=None, o2=None,): self.o1 = o1 + self.o2 = o2 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37143,6 +37612,12 @@ def read(self, iprot): self.o1.read(iprot) else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnAbortedException() + self.o2.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -37152,11 +37627,15 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('abort_txn_result') + oprot.writeStructBegin('commit_txn_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37167,6 +37646,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -37180,7 +37660,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class abort_txns_args: +class get_open_write_ids_args: """ Attributes: - rqst @@ -37188,7 +37668,7 @@ class abort_txns_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (AbortTxnsRequest, AbortTxnsRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (GetOpenWriteIdsRequest, GetOpenWriteIdsRequest.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -37205,7 +37685,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = AbortTxnsRequest() + self.rqst = GetOpenWriteIdsRequest() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -37218,7 +37698,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('abort_txns_args') + oprot.writeStructBegin('get_open_write_ids_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -37246,15 +37726,174 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class abort_txns_result: +class get_open_write_ids_result: """ Attributes: + - success - o1 + - o2 """ thrift_spec = ( - None, # 0 + (0, TType.STRUCT, 'success', (GetOpenWriteIdsResponse, GetOpenWriteIdsResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + ) + + def __init__(self, success=None, o1=None, o2=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetOpenWriteIdsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_open_write_ids_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class add_transactional_table_args: + """ + Attributes: + - rqst + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (AddTransactionalTableRequest, AddTransactionalTableRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, rqst=None,): + self.rqst = rqst + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = AddTransactionalTableRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('add_transactional_table_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.rqst) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class add_transactional_table_result: + """ + Attributes: + - o1 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 ) def __init__(self, o1=None,): @@ -37271,7 +37910,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.o1 = NoSuchTxnException() + self.o1 = MetaException() self.o1.read(iprot) else: iprot.skip(ftype) @@ -37284,7 +37923,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('abort_txns_result') + oprot.writeStructBegin('add_transactional_table_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -37312,7 +37951,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class commit_txn_args: +class allocate_table_write_id_args: """ Attributes: - rqst @@ -37320,7 +37959,7 @@ class commit_txn_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (CommitTxnRequest, CommitTxnRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'rqst', (AllocateTableWriteIdRequest, AllocateTableWriteIdRequest.thrift_spec), None, ), # 1 ) def __init__(self, rqst=None,): @@ -37337,7 +37976,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = CommitTxnRequest() + self.rqst = AllocateTableWriteIdRequest() self.rqst.read(iprot) else: iprot.skip(ftype) @@ -37350,7 +37989,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('commit_txn_args') + oprot.writeStructBegin('allocate_table_write_id_args') if self.rqst is not None: oprot.writeFieldBegin('rqst', TType.STRUCT, 1) self.rqst.write(oprot) @@ -37378,22 +38017,27 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class commit_txn_result: +class allocate_table_write_id_result: """ Attributes: + - success - o1 - o2 + - o3 """ thrift_spec = ( - None, # 0 + (0, TType.STRUCT, 'success', (AllocateTableWriteIdResponse, AllocateTableWriteIdResponse.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, o1=None, o2=None,): + def __init__(self, success=None, o1=None, o2=None, o3=None,): + self.success = success self.o1 = o1 self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -37404,7 +38048,13 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: + if fid == 0: + if ftype == TType.STRUCT: + self.success = AllocateTableWriteIdResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: if ftype == TType.STRUCT: self.o1 = NoSuchTxnException() self.o1.read(iprot) @@ -37416,6 +38066,12 @@ def read(self, iprot): self.o2.read(iprot) else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -37425,7 +38081,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('commit_txn_result') + oprot.writeStructBegin('allocate_table_write_id_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -37434,6 +38094,10 @@ def write(self, oprot): oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -37443,8 +38107,10 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -38910,20 +39576,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.db_names = [] - (_etype1236, _size1233) = iprot.readListBegin() - for _i1237 in xrange(_size1233): - _elem1238 = iprot.readString() - self.db_names.append(_elem1238) + (_etype1271, _size1268) = iprot.readListBegin() + for _i1272 in xrange(_size1268): + _elem1273 = iprot.readString() + self.db_names.append(_elem1273) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.table_names = [] - (_etype1242, _size1239) = iprot.readListBegin() - for _i1243 in xrange(_size1239): - _elem1244 = iprot.readString() - self.table_names.append(_elem1244) + (_etype1277, _size1274) = iprot.readListBegin() + for _i1278 in xrange(_size1274): + _elem1279 = iprot.readString() + self.table_names.append(_elem1279) iprot.readListEnd() else: iprot.skip(ftype) @@ -38946,15 +39612,15 @@ def write(self, oprot): if self.db_names is not None: oprot.writeFieldBegin('db_names', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.db_names)) - for iter1245 in self.db_names: - oprot.writeString(iter1245) + for iter1280 in self.db_names: + oprot.writeString(iter1280) oprot.writeListEnd() oprot.writeFieldEnd() if self.table_names is not None: oprot.writeFieldBegin('table_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.table_names)) - for iter1246 in self.table_names: - oprot.writeString(iter1246) + for iter1281 in self.table_names: + oprot.writeString(iter1281) oprot.writeListEnd() oprot.writeFieldEnd() if self.txns_snapshot is not None: @@ -39011,11 +39677,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1250, _size1247) = iprot.readListBegin() - for _i1251 in xrange(_size1247): - _elem1252 = BasicTxnInfo() - _elem1252.read(iprot) - self.success.append(_elem1252) + (_etype1285, _size1282) = iprot.readListBegin() + for _i1286 in xrange(_size1282): + _elem1287 = BasicTxnInfo() + _elem1287.read(iprot) + self.success.append(_elem1287) iprot.readListEnd() else: iprot.skip(ftype) @@ -39032,8 +39698,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1253 in self.success: - iter1253.write(oprot) + for iter1288 in self.success: + iter1288.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 25e9a88..1a909b6 100644 --- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -9977,6 +9977,650 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class GetOpenWriteIdsRequest: + """ + Attributes: + - currentTxnId + - tableNames + """ + + thrift_spec = ( + None, # 0 + (1, TType.I64, 'currentTxnId', None, None, ), # 1 + (2, TType.LIST, 'tableNames', (TType.STRING,None), None, ), # 2 + ) + + def __init__(self, currentTxnId=None, tableNames=None,): + self.currentTxnId = currentTxnId + self.tableNames = tableNames + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.currentTxnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.tableNames = [] + (_etype502, _size499) = iprot.readListBegin() + for _i503 in xrange(_size499): + _elem504 = iprot.readString() + self.tableNames.append(_elem504) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetOpenWriteIdsRequest') + if self.currentTxnId is not None: + oprot.writeFieldBegin('currentTxnId', TType.I64, 1) + oprot.writeI64(self.currentTxnId) + oprot.writeFieldEnd() + if self.tableNames is not None: + oprot.writeFieldBegin('tableNames', TType.LIST, 2) + oprot.writeListBegin(TType.STRING, len(self.tableNames)) + for iter505 in self.tableNames: + oprot.writeString(iter505) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.currentTxnId is None: + raise TProtocol.TProtocolException(message='Required field currentTxnId is unset!') + if self.tableNames is None: + raise TProtocol.TProtocolException(message='Required field tableNames is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.currentTxnId) + value = (value * 31) ^ hash(self.tableNames) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class OpenWriteIds: + """ + Attributes: + - tableName + - writeIdHighWaterMark + - openWriteIds + - minWriteId + - abortedBits + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'tableName', None, None, ), # 1 + (2, TType.I64, 'writeIdHighWaterMark', None, None, ), # 2 + (3, TType.LIST, 'openWriteIds', (TType.I64,None), None, ), # 3 + (4, TType.I64, 'minWriteId', None, None, ), # 4 + (5, TType.STRING, 'abortedBits', None, None, ), # 5 + ) + + def __init__(self, tableName=None, writeIdHighWaterMark=None, openWriteIds=None, minWriteId=None, abortedBits=None,): + self.tableName = tableName + self.writeIdHighWaterMark = writeIdHighWaterMark + self.openWriteIds = openWriteIds + self.minWriteId = minWriteId + self.abortedBits = abortedBits + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.tableName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.writeIdHighWaterMark = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.openWriteIds = [] + (_etype509, _size506) = iprot.readListBegin() + for _i510 in xrange(_size506): + _elem511 = iprot.readI64() + self.openWriteIds.append(_elem511) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.minWriteId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.abortedBits = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('OpenWriteIds') + if self.tableName is not None: + oprot.writeFieldBegin('tableName', TType.STRING, 1) + oprot.writeString(self.tableName) + oprot.writeFieldEnd() + if self.writeIdHighWaterMark is not None: + oprot.writeFieldBegin('writeIdHighWaterMark', TType.I64, 2) + oprot.writeI64(self.writeIdHighWaterMark) + oprot.writeFieldEnd() + if self.openWriteIds is not None: + oprot.writeFieldBegin('openWriteIds', TType.LIST, 3) + oprot.writeListBegin(TType.I64, len(self.openWriteIds)) + for iter512 in self.openWriteIds: + oprot.writeI64(iter512) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.minWriteId is not None: + oprot.writeFieldBegin('minWriteId', TType.I64, 4) + oprot.writeI64(self.minWriteId) + oprot.writeFieldEnd() + if self.abortedBits is not None: + oprot.writeFieldBegin('abortedBits', TType.STRING, 5) + oprot.writeString(self.abortedBits) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.tableName is None: + raise TProtocol.TProtocolException(message='Required field tableName is unset!') + if self.writeIdHighWaterMark is None: + raise TProtocol.TProtocolException(message='Required field writeIdHighWaterMark is unset!') + if self.openWriteIds is None: + raise TProtocol.TProtocolException(message='Required field openWriteIds is unset!') + if self.abortedBits is None: + raise TProtocol.TProtocolException(message='Required field abortedBits is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.tableName) + value = (value * 31) ^ hash(self.writeIdHighWaterMark) + value = (value * 31) ^ hash(self.openWriteIds) + value = (value * 31) ^ hash(self.minWriteId) + value = (value * 31) ^ hash(self.abortedBits) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetOpenWriteIdsResponse: + """ + Attributes: + - openWriteIds + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'openWriteIds', (TType.STRUCT,(OpenWriteIds, OpenWriteIds.thrift_spec)), None, ), # 1 + ) + + def __init__(self, openWriteIds=None,): + self.openWriteIds = openWriteIds + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.openWriteIds = [] + (_etype516, _size513) = iprot.readListBegin() + for _i517 in xrange(_size513): + _elem518 = OpenWriteIds() + _elem518.read(iprot) + self.openWriteIds.append(_elem518) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetOpenWriteIdsResponse') + if self.openWriteIds is not None: + oprot.writeFieldBegin('openWriteIds', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.openWriteIds)) + for iter519 in self.openWriteIds: + iter519.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.openWriteIds is None: + raise TProtocol.TProtocolException(message='Required field openWriteIds is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.openWriteIds) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class AddTransactionalTableRequest: + """ + Attributes: + - dbName + - tableName + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbName', None, None, ), # 1 + (2, TType.STRING, 'tableName', None, None, ), # 2 + ) + + def __init__(self, dbName=None, tableName=None,): + self.dbName = dbName + self.tableName = tableName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tableName = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('AddTransactionalTableRequest') + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin('tableName', TType.STRING, 2) + oprot.writeString(self.tableName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tableName is None: + raise TProtocol.TProtocolException(message='Required field tableName is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tableName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class AllocateTableWriteIdRequest: + """ + Attributes: + - txnIds + - dbName + - tableName + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'txnIds', (TType.I64,None), None, ), # 1 + (2, TType.STRING, 'dbName', None, None, ), # 2 + (3, TType.STRING, 'tableName', None, None, ), # 3 + ) + + def __init__(self, txnIds=None, dbName=None, tableName=None,): + self.txnIds = txnIds + self.dbName = dbName + self.tableName = tableName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.txnIds = [] + (_etype523, _size520) = iprot.readListBegin() + for _i524 in xrange(_size520): + _elem525 = iprot.readI64() + self.txnIds.append(_elem525) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tableName = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('AllocateTableWriteIdRequest') + if self.txnIds is not None: + oprot.writeFieldBegin('txnIds', TType.LIST, 1) + oprot.writeListBegin(TType.I64, len(self.txnIds)) + for iter526 in self.txnIds: + oprot.writeI64(iter526) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 2) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin('tableName', TType.STRING, 3) + oprot.writeString(self.tableName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txnIds is None: + raise TProtocol.TProtocolException(message='Required field txnIds is unset!') + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tableName is None: + raise TProtocol.TProtocolException(message='Required field tableName is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.txnIds) + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tableName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class TxnToWriteId: + """ + Attributes: + - txnId + - writeId + """ + + thrift_spec = ( + None, # 0 + (1, TType.I64, 'txnId', None, None, ), # 1 + (2, TType.I64, 'writeId', None, None, ), # 2 + ) + + def __init__(self, txnId=None, writeId=None,): + self.txnId = txnId + self.writeId = writeId + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('TxnToWriteId') + if self.txnId is not None: + oprot.writeFieldBegin('txnId', TType.I64, 1) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 2) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txnId is None: + raise TProtocol.TProtocolException(message='Required field txnId is unset!') + if self.writeId is None: + raise TProtocol.TProtocolException(message='Required field writeId is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.txnId) + value = (value * 31) ^ hash(self.writeId) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class AllocateTableWriteIdResponse: + """ + Attributes: + - txnToWriteIds + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'txnToWriteIds', (TType.STRUCT,(TxnToWriteId, TxnToWriteId.thrift_spec)), None, ), # 1 + ) + + def __init__(self, txnToWriteIds=None,): + self.txnToWriteIds = txnToWriteIds + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.txnToWriteIds = [] + (_etype530, _size527) = iprot.readListBegin() + for _i531 in xrange(_size527): + _elem532 = TxnToWriteId() + _elem532.read(iprot) + self.txnToWriteIds.append(_elem532) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('AllocateTableWriteIdResponse') + if self.txnToWriteIds is not None: + oprot.writeFieldBegin('txnToWriteIds', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.txnToWriteIds)) + for iter533 in self.txnToWriteIds: + iter533.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txnToWriteIds is None: + raise TProtocol.TProtocolException(message='Required field txnToWriteIds is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.txnToWriteIds) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class LockComponent: """ Attributes: @@ -10177,11 +10821,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.component = [] - (_etype502, _size499) = iprot.readListBegin() - for _i503 in xrange(_size499): - _elem504 = LockComponent() - _elem504.read(iprot) - self.component.append(_elem504) + (_etype537, _size534) = iprot.readListBegin() + for _i538 in xrange(_size534): + _elem539 = LockComponent() + _elem539.read(iprot) + self.component.append(_elem539) iprot.readListEnd() else: iprot.skip(ftype) @@ -10218,8 +10862,8 @@ def write(self, oprot): if self.component is not None: oprot.writeFieldBegin('component', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.component)) - for iter505 in self.component: - iter505.write(oprot) + for iter540 in self.component: + iter540.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.txnid is not None: @@ -10917,11 +11561,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.locks = [] - (_etype509, _size506) = iprot.readListBegin() - for _i510 in xrange(_size506): - _elem511 = ShowLocksResponseElement() - _elem511.read(iprot) - self.locks.append(_elem511) + (_etype544, _size541) = iprot.readListBegin() + for _i545 in xrange(_size541): + _elem546 = ShowLocksResponseElement() + _elem546.read(iprot) + self.locks.append(_elem546) iprot.readListEnd() else: iprot.skip(ftype) @@ -10938,8 +11582,8 @@ def write(self, oprot): if self.locks is not None: oprot.writeFieldBegin('locks', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.locks)) - for iter512 in self.locks: - iter512.write(oprot) + for iter547 in self.locks: + iter547.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11154,20 +11798,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.SET: self.aborted = set() - (_etype516, _size513) = iprot.readSetBegin() - for _i517 in xrange(_size513): - _elem518 = iprot.readI64() - self.aborted.add(_elem518) + (_etype551, _size548) = iprot.readSetBegin() + for _i552 in xrange(_size548): + _elem553 = iprot.readI64() + self.aborted.add(_elem553) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.SET: self.nosuch = set() - (_etype522, _size519) = iprot.readSetBegin() - for _i523 in xrange(_size519): - _elem524 = iprot.readI64() - self.nosuch.add(_elem524) + (_etype557, _size554) = iprot.readSetBegin() + for _i558 in xrange(_size554): + _elem559 = iprot.readI64() + self.nosuch.add(_elem559) iprot.readSetEnd() else: iprot.skip(ftype) @@ -11184,15 +11828,15 @@ def write(self, oprot): if self.aborted is not None: oprot.writeFieldBegin('aborted', TType.SET, 1) oprot.writeSetBegin(TType.I64, len(self.aborted)) - for iter525 in self.aborted: - oprot.writeI64(iter525) + for iter560 in self.aborted: + oprot.writeI64(iter560) oprot.writeSetEnd() oprot.writeFieldEnd() if self.nosuch is not None: oprot.writeFieldBegin('nosuch', TType.SET, 2) oprot.writeSetBegin(TType.I64, len(self.nosuch)) - for iter526 in self.nosuch: - oprot.writeI64(iter526) + for iter561 in self.nosuch: + oprot.writeI64(iter561) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11289,11 +11933,11 @@ def read(self, iprot): elif fid == 6: if ftype == TType.MAP: self.properties = {} - (_ktype528, _vtype529, _size527 ) = iprot.readMapBegin() - for _i531 in xrange(_size527): - _key532 = iprot.readString() - _val533 = iprot.readString() - self.properties[_key532] = _val533 + (_ktype563, _vtype564, _size562 ) = iprot.readMapBegin() + for _i566 in xrange(_size562): + _key567 = iprot.readString() + _val568 = iprot.readString() + self.properties[_key567] = _val568 iprot.readMapEnd() else: iprot.skip(ftype) @@ -11330,9 +11974,9 @@ def write(self, oprot): if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 6) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter534,viter535 in self.properties.items(): - oprot.writeString(kiter534) - oprot.writeString(viter535) + for kiter569,viter570 in self.properties.items(): + oprot.writeString(kiter569) + oprot.writeString(viter570) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11767,11 +12411,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.compacts = [] - (_etype539, _size536) = iprot.readListBegin() - for _i540 in xrange(_size536): - _elem541 = ShowCompactResponseElement() - _elem541.read(iprot) - self.compacts.append(_elem541) + (_etype574, _size571) = iprot.readListBegin() + for _i575 in xrange(_size571): + _elem576 = ShowCompactResponseElement() + _elem576.read(iprot) + self.compacts.append(_elem576) iprot.readListEnd() else: iprot.skip(ftype) @@ -11788,8 +12432,8 @@ def write(self, oprot): if self.compacts is not None: oprot.writeFieldBegin('compacts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.compacts)) - for iter542 in self.compacts: - iter542.write(oprot) + for iter577 in self.compacts: + iter577.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11870,10 +12514,10 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partitionnames = [] - (_etype546, _size543) = iprot.readListBegin() - for _i547 in xrange(_size543): - _elem548 = iprot.readString() - self.partitionnames.append(_elem548) + (_etype581, _size578) = iprot.readListBegin() + for _i582 in xrange(_size578): + _elem583 = iprot.readString() + self.partitionnames.append(_elem583) iprot.readListEnd() else: iprot.skip(ftype) @@ -11907,8 +12551,8 @@ def write(self, oprot): if self.partitionnames is not None: oprot.writeFieldBegin('partitionnames', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partitionnames)) - for iter549 in self.partitionnames: - oprot.writeString(iter549) + for iter584 in self.partitionnames: + oprot.writeString(iter584) oprot.writeListEnd() oprot.writeFieldEnd() if self.operationType is not None: @@ -12129,10 +12773,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.open_txns = [] - (_etype553, _size550) = iprot.readListBegin() - for _i554 in xrange(_size550): - _elem555 = iprot.readI64() - self.open_txns.append(_elem555) + (_etype588, _size585) = iprot.readListBegin() + for _i589 in xrange(_size585): + _elem590 = iprot.readI64() + self.open_txns.append(_elem590) iprot.readListEnd() else: iprot.skip(ftype) @@ -12153,8 +12797,8 @@ def write(self, oprot): if self.open_txns is not None: oprot.writeFieldBegin('open_txns', TType.LIST, 2) oprot.writeListBegin(TType.I64, len(self.open_txns)) - for iter556 in self.open_txns: - oprot.writeI64(iter556) + for iter591 in self.open_txns: + oprot.writeI64(iter591) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12442,11 +13086,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.events = [] - (_etype560, _size557) = iprot.readListBegin() - for _i561 in xrange(_size557): - _elem562 = NotificationEvent() - _elem562.read(iprot) - self.events.append(_elem562) + (_etype595, _size592) = iprot.readListBegin() + for _i596 in xrange(_size592): + _elem597 = NotificationEvent() + _elem597.read(iprot) + self.events.append(_elem597) iprot.readListEnd() else: iprot.skip(ftype) @@ -12463,8 +13107,8 @@ def write(self, oprot): if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.events)) - for iter563 in self.events: - iter563.write(oprot) + for iter598 in self.events: + iter598.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12745,20 +13389,20 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.filesAdded = [] - (_etype567, _size564) = iprot.readListBegin() - for _i568 in xrange(_size564): - _elem569 = iprot.readString() - self.filesAdded.append(_elem569) + (_etype602, _size599) = iprot.readListBegin() + for _i603 in xrange(_size599): + _elem604 = iprot.readString() + self.filesAdded.append(_elem604) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.filesAddedChecksum = [] - (_etype573, _size570) = iprot.readListBegin() - for _i574 in xrange(_size570): - _elem575 = iprot.readString() - self.filesAddedChecksum.append(_elem575) + (_etype608, _size605) = iprot.readListBegin() + for _i609 in xrange(_size605): + _elem610 = iprot.readString() + self.filesAddedChecksum.append(_elem610) iprot.readListEnd() else: iprot.skip(ftype) @@ -12779,15 +13423,15 @@ def write(self, oprot): if self.filesAdded is not None: oprot.writeFieldBegin('filesAdded', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.filesAdded)) - for iter576 in self.filesAdded: - oprot.writeString(iter576) + for iter611 in self.filesAdded: + oprot.writeString(iter611) oprot.writeListEnd() oprot.writeFieldEnd() if self.filesAddedChecksum is not None: oprot.writeFieldBegin('filesAddedChecksum', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.filesAddedChecksum)) - for iter577 in self.filesAddedChecksum: - oprot.writeString(iter577) + for iter612 in self.filesAddedChecksum: + oprot.writeString(iter612) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12942,10 +13586,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionVals = [] - (_etype581, _size578) = iprot.readListBegin() - for _i582 in xrange(_size578): - _elem583 = iprot.readString() - self.partitionVals.append(_elem583) + (_etype616, _size613) = iprot.readListBegin() + for _i617 in xrange(_size613): + _elem618 = iprot.readString() + self.partitionVals.append(_elem618) iprot.readListEnd() else: iprot.skip(ftype) @@ -12978,8 +13622,8 @@ def write(self, oprot): if self.partitionVals is not None: oprot.writeFieldBegin('partitionVals', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionVals)) - for iter584 in self.partitionVals: - oprot.writeString(iter584) + for iter619 in self.partitionVals: + oprot.writeString(iter619) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13166,12 +13810,12 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype586, _vtype587, _size585 ) = iprot.readMapBegin() - for _i589 in xrange(_size585): - _key590 = iprot.readI64() - _val591 = MetadataPpdResult() - _val591.read(iprot) - self.metadata[_key590] = _val591 + (_ktype621, _vtype622, _size620 ) = iprot.readMapBegin() + for _i624 in xrange(_size620): + _key625 = iprot.readI64() + _val626 = MetadataPpdResult() + _val626.read(iprot) + self.metadata[_key625] = _val626 iprot.readMapEnd() else: iprot.skip(ftype) @@ -13193,9 +13837,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.metadata)) - for kiter592,viter593 in self.metadata.items(): - oprot.writeI64(kiter592) - viter593.write(oprot) + for kiter627,viter628 in self.metadata.items(): + oprot.writeI64(kiter627) + viter628.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -13265,10 +13909,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype597, _size594) = iprot.readListBegin() - for _i598 in xrange(_size594): - _elem599 = iprot.readI64() - self.fileIds.append(_elem599) + (_etype632, _size629) = iprot.readListBegin() + for _i633 in xrange(_size629): + _elem634 = iprot.readI64() + self.fileIds.append(_elem634) iprot.readListEnd() else: iprot.skip(ftype) @@ -13300,8 +13944,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter600 in self.fileIds: - oprot.writeI64(iter600) + for iter635 in self.fileIds: + oprot.writeI64(iter635) oprot.writeListEnd() oprot.writeFieldEnd() if self.expr is not None: @@ -13375,11 +14019,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype602, _vtype603, _size601 ) = iprot.readMapBegin() - for _i605 in xrange(_size601): - _key606 = iprot.readI64() - _val607 = iprot.readString() - self.metadata[_key606] = _val607 + (_ktype637, _vtype638, _size636 ) = iprot.readMapBegin() + for _i640 in xrange(_size636): + _key641 = iprot.readI64() + _val642 = iprot.readString() + self.metadata[_key641] = _val642 iprot.readMapEnd() else: iprot.skip(ftype) @@ -13401,9 +14045,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRING, len(self.metadata)) - for kiter608,viter609 in self.metadata.items(): - oprot.writeI64(kiter608) - oprot.writeString(viter609) + for kiter643,viter644 in self.metadata.items(): + oprot.writeI64(kiter643) + oprot.writeString(viter644) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -13464,10 +14108,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype613, _size610) = iprot.readListBegin() - for _i614 in xrange(_size610): - _elem615 = iprot.readI64() - self.fileIds.append(_elem615) + (_etype648, _size645) = iprot.readListBegin() + for _i649 in xrange(_size645): + _elem650 = iprot.readI64() + self.fileIds.append(_elem650) iprot.readListEnd() else: iprot.skip(ftype) @@ -13484,8 +14128,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter616 in self.fileIds: - oprot.writeI64(iter616) + for iter651 in self.fileIds: + oprot.writeI64(iter651) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13591,20 +14235,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype620, _size617) = iprot.readListBegin() - for _i621 in xrange(_size617): - _elem622 = iprot.readI64() - self.fileIds.append(_elem622) + (_etype655, _size652) = iprot.readListBegin() + for _i656 in xrange(_size652): + _elem657 = iprot.readI64() + self.fileIds.append(_elem657) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.metadata = [] - (_etype626, _size623) = iprot.readListBegin() - for _i627 in xrange(_size623): - _elem628 = iprot.readString() - self.metadata.append(_elem628) + (_etype661, _size658) = iprot.readListBegin() + for _i662 in xrange(_size658): + _elem663 = iprot.readString() + self.metadata.append(_elem663) iprot.readListEnd() else: iprot.skip(ftype) @@ -13626,15 +14270,15 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter629 in self.fileIds: - oprot.writeI64(iter629) + for iter664 in self.fileIds: + oprot.writeI64(iter664) oprot.writeListEnd() oprot.writeFieldEnd() if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.metadata)) - for iter630 in self.metadata: - oprot.writeString(iter630) + for iter665 in self.metadata: + oprot.writeString(iter665) oprot.writeListEnd() oprot.writeFieldEnd() if self.type is not None: @@ -13742,10 +14386,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype634, _size631) = iprot.readListBegin() - for _i635 in xrange(_size631): - _elem636 = iprot.readI64() - self.fileIds.append(_elem636) + (_etype669, _size666) = iprot.readListBegin() + for _i670 in xrange(_size666): + _elem671 = iprot.readI64() + self.fileIds.append(_elem671) iprot.readListEnd() else: iprot.skip(ftype) @@ -13762,8 +14406,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter637 in self.fileIds: - oprot.writeI64(iter637) + for iter672 in self.fileIds: + oprot.writeI64(iter672) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13992,11 +14636,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.functions = [] - (_etype641, _size638) = iprot.readListBegin() - for _i642 in xrange(_size638): - _elem643 = Function() - _elem643.read(iprot) - self.functions.append(_elem643) + (_etype676, _size673) = iprot.readListBegin() + for _i677 in xrange(_size673): + _elem678 = Function() + _elem678.read(iprot) + self.functions.append(_elem678) iprot.readListEnd() else: iprot.skip(ftype) @@ -14013,8 +14657,8 @@ def write(self, oprot): if self.functions is not None: oprot.writeFieldBegin('functions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.functions)) - for iter644 in self.functions: - iter644.write(oprot) + for iter679 in self.functions: + iter679.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14066,10 +14710,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype648, _size645) = iprot.readListBegin() - for _i649 in xrange(_size645): - _elem650 = iprot.readI32() - self.values.append(_elem650) + (_etype683, _size680) = iprot.readListBegin() + for _i684 in xrange(_size680): + _elem685 = iprot.readI32() + self.values.append(_elem685) iprot.readListEnd() else: iprot.skip(ftype) @@ -14086,8 +14730,8 @@ def write(self, oprot): if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.I32, len(self.values)) - for iter651 in self.values: - oprot.writeI32(iter651) + for iter686 in self.values: + oprot.writeI32(iter686) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14316,10 +14960,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tblNames = [] - (_etype655, _size652) = iprot.readListBegin() - for _i656 in xrange(_size652): - _elem657 = iprot.readString() - self.tblNames.append(_elem657) + (_etype690, _size687) = iprot.readListBegin() + for _i691 in xrange(_size687): + _elem692 = iprot.readString() + self.tblNames.append(_elem692) iprot.readListEnd() else: iprot.skip(ftype) @@ -14346,8 +14990,8 @@ def write(self, oprot): if self.tblNames is not None: oprot.writeFieldBegin('tblNames', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tblNames)) - for iter658 in self.tblNames: - oprot.writeString(iter658) + for iter693 in self.tblNames: + oprot.writeString(iter693) oprot.writeListEnd() oprot.writeFieldEnd() if self.capabilities is not None: @@ -14407,11 +15051,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tables = [] - (_etype662, _size659) = iprot.readListBegin() - for _i663 in xrange(_size659): - _elem664 = Table() - _elem664.read(iprot) - self.tables.append(_elem664) + (_etype697, _size694) = iprot.readListBegin() + for _i698 in xrange(_size694): + _elem699 = Table() + _elem699.read(iprot) + self.tables.append(_elem699) iprot.readListEnd() else: iprot.skip(ftype) @@ -14428,8 +15072,8 @@ def write(self, oprot): if self.tables is not None: oprot.writeFieldBegin('tables', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tables)) - for iter665 in self.tables: - iter665.write(oprot) + for iter700 in self.tables: + iter700.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14733,10 +15377,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.SET: self.tablesUsed = set() - (_etype669, _size666) = iprot.readSetBegin() - for _i670 in xrange(_size666): - _elem671 = iprot.readString() - self.tablesUsed.add(_elem671) + (_etype704, _size701) = iprot.readSetBegin() + for _i705 in xrange(_size701): + _elem706 = iprot.readString() + self.tablesUsed.add(_elem706) iprot.readSetEnd() else: iprot.skip(ftype) @@ -14762,8 +15406,8 @@ def write(self, oprot): if self.tablesUsed is not None: oprot.writeFieldBegin('tablesUsed', TType.SET, 2) oprot.writeSetBegin(TType.STRING, len(self.tablesUsed)) - for iter672 in self.tablesUsed: - oprot.writeString(iter672) + for iter707 in self.tablesUsed: + oprot.writeString(iter707) oprot.writeSetEnd() oprot.writeFieldEnd() if self.invalidationTime is not None: @@ -15665,44 +16309,44 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.pools = [] - (_etype676, _size673) = iprot.readListBegin() - for _i677 in xrange(_size673): - _elem678 = WMPool() - _elem678.read(iprot) - self.pools.append(_elem678) + (_etype711, _size708) = iprot.readListBegin() + for _i712 in xrange(_size708): + _elem713 = WMPool() + _elem713.read(iprot) + self.pools.append(_elem713) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.mappings = [] - (_etype682, _size679) = iprot.readListBegin() - for _i683 in xrange(_size679): - _elem684 = WMMapping() - _elem684.read(iprot) - self.mappings.append(_elem684) + (_etype717, _size714) = iprot.readListBegin() + for _i718 in xrange(_size714): + _elem719 = WMMapping() + _elem719.read(iprot) + self.mappings.append(_elem719) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.triggers = [] - (_etype688, _size685) = iprot.readListBegin() - for _i689 in xrange(_size685): - _elem690 = WMTrigger() - _elem690.read(iprot) - self.triggers.append(_elem690) + (_etype723, _size720) = iprot.readListBegin() + for _i724 in xrange(_size720): + _elem725 = WMTrigger() + _elem725.read(iprot) + self.triggers.append(_elem725) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.poolTriggers = [] - (_etype694, _size691) = iprot.readListBegin() - for _i695 in xrange(_size691): - _elem696 = WMPoolTrigger() - _elem696.read(iprot) - self.poolTriggers.append(_elem696) + (_etype729, _size726) = iprot.readListBegin() + for _i730 in xrange(_size726): + _elem731 = WMPoolTrigger() + _elem731.read(iprot) + self.poolTriggers.append(_elem731) iprot.readListEnd() else: iprot.skip(ftype) @@ -15723,29 +16367,29 @@ def write(self, oprot): if self.pools is not None: oprot.writeFieldBegin('pools', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.pools)) - for iter697 in self.pools: - iter697.write(oprot) + for iter732 in self.pools: + iter732.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.mappings is not None: oprot.writeFieldBegin('mappings', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.mappings)) - for iter698 in self.mappings: - iter698.write(oprot) + for iter733 in self.mappings: + iter733.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter699 in self.triggers: - iter699.write(oprot) + for iter734 in self.triggers: + iter734.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.poolTriggers is not None: oprot.writeFieldBegin('poolTriggers', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.poolTriggers)) - for iter700 in self.poolTriggers: - iter700.write(oprot) + for iter735 in self.poolTriggers: + iter735.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16219,11 +16863,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.resourcePlans = [] - (_etype704, _size701) = iprot.readListBegin() - for _i705 in xrange(_size701): - _elem706 = WMResourcePlan() - _elem706.read(iprot) - self.resourcePlans.append(_elem706) + (_etype739, _size736) = iprot.readListBegin() + for _i740 in xrange(_size736): + _elem741 = WMResourcePlan() + _elem741.read(iprot) + self.resourcePlans.append(_elem741) iprot.readListEnd() else: iprot.skip(ftype) @@ -16240,8 +16884,8 @@ def write(self, oprot): if self.resourcePlans is not None: oprot.writeFieldBegin('resourcePlans', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.resourcePlans)) - for iter707 in self.resourcePlans: - iter707.write(oprot) + for iter742 in self.resourcePlans: + iter742.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16545,20 +17189,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.errors = [] - (_etype711, _size708) = iprot.readListBegin() - for _i712 in xrange(_size708): - _elem713 = iprot.readString() - self.errors.append(_elem713) + (_etype746, _size743) = iprot.readListBegin() + for _i747 in xrange(_size743): + _elem748 = iprot.readString() + self.errors.append(_elem748) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.warnings = [] - (_etype717, _size714) = iprot.readListBegin() - for _i718 in xrange(_size714): - _elem719 = iprot.readString() - self.warnings.append(_elem719) + (_etype752, _size749) = iprot.readListBegin() + for _i753 in xrange(_size749): + _elem754 = iprot.readString() + self.warnings.append(_elem754) iprot.readListEnd() else: iprot.skip(ftype) @@ -16575,15 +17219,15 @@ def write(self, oprot): if self.errors is not None: oprot.writeFieldBegin('errors', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.errors)) - for iter720 in self.errors: - oprot.writeString(iter720) + for iter755 in self.errors: + oprot.writeString(iter755) oprot.writeListEnd() oprot.writeFieldEnd() if self.warnings is not None: oprot.writeFieldBegin('warnings', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.warnings)) - for iter721 in self.warnings: - oprot.writeString(iter721) + for iter756 in self.warnings: + oprot.writeString(iter756) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17160,11 +17804,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.triggers = [] - (_etype725, _size722) = iprot.readListBegin() - for _i726 in xrange(_size722): - _elem727 = WMTrigger() - _elem727.read(iprot) - self.triggers.append(_elem727) + (_etype760, _size757) = iprot.readListBegin() + for _i761 in xrange(_size757): + _elem762 = WMTrigger() + _elem762.read(iprot) + self.triggers.append(_elem762) iprot.readListEnd() else: iprot.skip(ftype) @@ -17181,8 +17825,8 @@ def write(self, oprot): if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter728 in self.triggers: - iter728.write(oprot) + for iter763 in self.triggers: + iter763.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() diff --git a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 3a11a05..2957cc0 100644 --- a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -2220,6 +2220,151 @@ class CommitTxnRequest ::Thrift::Struct.generate_accessors self end +class GetOpenWriteIdsRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + CURRENTTXNID = 1 + TABLENAMES = 2 + + FIELDS = { + CURRENTTXNID => {:type => ::Thrift::Types::I64, :name => 'currentTxnId'}, + TABLENAMES => {:type => ::Thrift::Types::LIST, :name => 'tableNames', :element => {:type => ::Thrift::Types::STRING}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field currentTxnId is unset!') unless @currentTxnId + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableNames is unset!') unless @tableNames + end + + ::Thrift::Struct.generate_accessors self +end + +class OpenWriteIds + include ::Thrift::Struct, ::Thrift::Struct_Union + TABLENAME = 1 + WRITEIDHIGHWATERMARK = 2 + OPENWRITEIDS = 3 + MINWRITEID = 4 + ABORTEDBITS = 5 + + FIELDS = { + TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, + WRITEIDHIGHWATERMARK => {:type => ::Thrift::Types::I64, :name => 'writeIdHighWaterMark'}, + OPENWRITEIDS => {:type => ::Thrift::Types::LIST, :name => 'openWriteIds', :element => {:type => ::Thrift::Types::I64}}, + MINWRITEID => {:type => ::Thrift::Types::I64, :name => 'minWriteId', :optional => true}, + ABORTEDBITS => {:type => ::Thrift::Types::STRING, :name => 'abortedBits', :binary => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeIdHighWaterMark is unset!') unless @writeIdHighWaterMark + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field openWriteIds is unset!') unless @openWriteIds + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field abortedBits is unset!') unless @abortedBits + end + + ::Thrift::Struct.generate_accessors self +end + +class GetOpenWriteIdsResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + OPENWRITEIDS = 1 + + FIELDS = { + OPENWRITEIDS => {:type => ::Thrift::Types::LIST, :name => 'openWriteIds', :element => {:type => ::Thrift::Types::STRUCT, :class => ::OpenWriteIds}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field openWriteIds is unset!') unless @openWriteIds + end + + ::Thrift::Struct.generate_accessors self +end + +class AddTransactionalTableRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TABLENAME = 2 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName + end + + ::Thrift::Struct.generate_accessors self +end + +class AllocateTableWriteIdRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + TXNIDS = 1 + DBNAME = 2 + TABLENAME = 3 + + FIELDS = { + TXNIDS => {:type => ::Thrift::Types::LIST, :name => 'txnIds', :element => {:type => ::Thrift::Types::I64}}, + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field txnIds is unset!') unless @txnIds + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName + end + + ::Thrift::Struct.generate_accessors self +end + +class TxnToWriteId + include ::Thrift::Struct, ::Thrift::Struct_Union + TXNID = 1 + WRITEID = 2 + + FIELDS = { + TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId'}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field txnId is unset!') unless @txnId + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeId is unset!') unless @writeId + end + + ::Thrift::Struct.generate_accessors self +end + +class AllocateTableWriteIdResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + TXNTOWRITEIDS = 1 + + FIELDS = { + TXNTOWRITEIDS => {:type => ::Thrift::Types::LIST, :name => 'txnToWriteIds', :element => {:type => ::Thrift::Types::STRUCT, :class => ::TxnToWriteId}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field txnToWriteIds is unset!') unless @txnToWriteIds + end + + ::Thrift::Struct.generate_accessors self +end + class LockComponent include ::Thrift::Struct, ::Thrift::Struct_Union TYPE = 1 diff --git a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index a788c08..79ba29d 100644 --- a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -2388,6 +2388,56 @@ module ThriftHiveMetastore return end + def get_open_write_ids(rqst) + send_get_open_write_ids(rqst) + return recv_get_open_write_ids() + end + + def send_get_open_write_ids(rqst) + send_message('get_open_write_ids', Get_open_write_ids_args, :rqst => rqst) + end + + def recv_get_open_write_ids() + result = receive_message(Get_open_write_ids_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_open_write_ids failed: unknown result') + end + + def add_transactional_table(rqst) + send_add_transactional_table(rqst) + recv_add_transactional_table() + end + + def send_add_transactional_table(rqst) + send_message('add_transactional_table', Add_transactional_table_args, :rqst => rqst) + end + + def recv_add_transactional_table() + result = receive_message(Add_transactional_table_result) + raise result.o1 unless result.o1.nil? + return + end + + def allocate_table_write_id(rqst) + send_allocate_table_write_id(rqst) + return recv_allocate_table_write_id() + end + + def send_allocate_table_write_id(rqst) + send_message('allocate_table_write_id', Allocate_table_write_id_args, :rqst => rqst) + end + + def recv_allocate_table_write_id() + result = receive_message(Allocate_table_write_id_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'allocate_table_write_id failed: unknown result') + end + def lock(rqst) send_lock(rqst) return recv_lock() @@ -4882,6 +4932,45 @@ module ThriftHiveMetastore write_result(result, oprot, 'commit_txn', seqid) end + def process_get_open_write_ids(seqid, iprot, oprot) + args = read_args(iprot, Get_open_write_ids_args) + result = Get_open_write_ids_result.new() + begin + result.success = @handler.get_open_write_ids(args.rqst) + rescue ::NoSuchTxnException => o1 + result.o1 = o1 + rescue ::MetaException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'get_open_write_ids', seqid) + end + + def process_add_transactional_table(seqid, iprot, oprot) + args = read_args(iprot, Add_transactional_table_args) + result = Add_transactional_table_result.new() + begin + @handler.add_transactional_table(args.rqst) + rescue ::MetaException => o1 + result.o1 = o1 + end + write_result(result, oprot, 'add_transactional_table', seqid) + end + + def process_allocate_table_write_id(seqid, iprot, oprot) + args = read_args(iprot, Allocate_table_write_id_args) + result = Allocate_table_write_id_result.new() + begin + result.success = @handler.allocate_table_write_id(args.rqst) + rescue ::NoSuchTxnException => o1 + result.o1 = o1 + rescue ::TxnAbortedException => o2 + result.o2 = o2 + rescue ::MetaException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'allocate_table_write_id', seqid) + end + def process_lock(seqid, iprot, oprot) args = read_args(iprot, Lock_args) result = Lock_result.new() @@ -10751,6 +10840,112 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Get_open_write_ids_args + include ::Thrift::Struct, ::Thrift::Struct_Union + RQST = 1 + + FIELDS = { + RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::GetOpenWriteIdsRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_open_write_ids_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetOpenWriteIdsResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchTxnException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Add_transactional_table_args + include ::Thrift::Struct, ::Thrift::Struct_Union + RQST = 1 + + FIELDS = { + RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::AddTransactionalTableRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Add_transactional_table_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Allocate_table_write_id_args + include ::Thrift::Struct, ::Thrift::Struct_Union + RQST = 1 + + FIELDS = { + RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::AllocateTableWriteIdRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Allocate_table_write_id_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + O3 = 3 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::AllocateTableWriteIdResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchTxnException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::TxnAbortedException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Lock_args include ::Thrift::Struct, ::Thrift::Struct_Union RQST = 1 diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index ecc4644..61e10dc 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -1544,6 +1544,11 @@ private void create_table_core(final RawStore ms, final Table tbl, } } + if (TxnUtils.isTransactionalTable(tbl)) { + // Need to update the table write ID sequence generation table + getTxnHandler().addTransactionalTable(new AddTransactionalTableRequest(tbl.getDbName(), tbl.getTableName())); + } + if (!transactionalListeners.isEmpty()) { transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.CREATE_TABLE, new CreateTableEvent(tbl, true, this), envContext); @@ -6681,6 +6686,22 @@ public void commit_txn(CommitTxnRequest rqst) throws TException { } @Override + public GetOpenWriteIdsResponse get_open_write_ids(GetOpenWriteIdsRequest rqst) throws TException { + return getTxnHandler().getOpenWriteIds(rqst); + } + + @Override + public void add_transactional_table(AddTransactionalTableRequest rqst) throws TException { + getTxnHandler().addTransactionalTable(rqst); + } + + @Override + public AllocateTableWriteIdResponse allocate_table_write_id( + AllocateTableWriteIdRequest rqst) throws TException { + return getTxnHandler().allocateTableWriteId(rqst); + } + + @Override public LockResponse lock(LockRequest rqst) throws TException { return getTxnHandler().lock(rqst); } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index a3cb17b..54dca43 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -54,6 +54,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; @@ -2199,6 +2201,20 @@ public ValidTxnList getValidTxns(long currentTxn) throws TException { } @Override + public ValidWriteIdList getValidWriteIds(String tableName) throws TException { + GetOpenWriteIdsRequest rqst = new GetOpenWriteIdsRequest(0, Collections.singletonList(tableName)); + GetOpenWriteIdsResponse openWriteIds = client.get_open_write_ids(rqst); + return TxnUtils.createValidReaderWriteIdList(openWriteIds.getOpenWriteIds().get(0)); + } + + // TODO (Sankar): Need to modify the API definition to take ValidTxnList as input. + @Override + public ValidTxnWriteIdList getValidWriteIds(long currentTxn, List tablesList) throws TException { + GetOpenWriteIdsRequest rqst = new GetOpenWriteIdsRequest(currentTxn, tablesList); + return TxnUtils.createValidTxnWriteIdList(client.get_open_write_ids(rqst)); + } + + @Override public long openTxn(String user) throws TException { OpenTxnsResponse txns = openTxns(user, 1); return txns.getTxn_ids().get(0); @@ -2238,6 +2254,20 @@ public void abortTxns(List txnids) throws NoSuchTxnException, TException { } @Override + public long allocateTableWriteId(long txnId, String dbName, String tableName) throws TException { + return allocateTableWriteIdsBatch(Collections.singletonList(txnId), dbName, tableName).get(0).getWriteId(); + } + + @Override + public List allocateTableWriteIdsBatch(List txnIds, String dbName, String tableName) + throws TException { + AllocateTableWriteIdRequest rqst + = new AllocateTableWriteIdRequest(txnIds, dbName, tableName); + AllocateTableWriteIdResponse writeId = client.allocate_table_write_id(rqst); + return writeId.getTxnToWriteIds(); + } + + @Override public LockResponse lock(LockRequest request) throws NoSuchTxnException, TxnAbortedException, TException { return client.lock(request); diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 8ec8b3b..b5596b8 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -29,6 +29,8 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.classification.RetrySemantics; import org.apache.hadoop.hive.metastore.annotation.NoReconnect; import org.apache.hadoop.hive.metastore.api.AggrStats; @@ -99,6 +101,7 @@ import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.api.TxnAbortedException; import org.apache.hadoop.hive.metastore.api.TxnOpenException; +import org.apache.hadoop.hive.metastore.api.TxnToWriteId; import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; @@ -1364,14 +1367,33 @@ BasicTxnInfo getLastCompletedTransactionForTable(String dbName, String tableName /** * Get a structure that details valid transactions. - * @param currentTxn The current transaction of the caller. This will be removed from the + * @param currentTxn The current transaction of the caller. This will be removed from the * exceptions list so that the caller sees records from his own transaction. - * @return list of valid transactions + * @return list of valid transactions and also valid write IDs for each input table. * @throws TException */ ValidTxnList getValidTxns(long currentTxn) throws TException; /** + * Get a structure that details valid transactions. + * @param tableName full table name of format . + * @return list of valid write ids for the given table + * @throws TException + */ + ValidWriteIdList getValidWriteIds(String tableName) throws TException; + + /** + * Get a structure that details valid transactions. + * @param currentTxn The current transaction of the caller. Corresponding write id will be removed + * from the exceptions list so that the caller sees records from his own transaction. + * @param tablesList list of tables read from the current transaction for which needs to populate + * the valid write ids + * @return list of valid write ids for the given list of tables. + * @throws TException + */ + ValidTxnWriteIdList getValidWriteIds(long currentTxn, List tablesList) throws TException; + + /** * Initiate a transaction. * @param user User who is opening this transaction. This is the Hive user, * not necessarily the OS user. It is assumed that this user has already been @@ -1440,6 +1462,24 @@ void commitTxn(long txnid) void abortTxns(List txnids) throws TException; /** + * Allocate a per table write ID and associate it with the given transaction + * @param txnId id of transaction to which the allocated write ID to be associated. + * @param dbName name of DB in which the table belongs. + * @param tableName table to which the write ID to be allocated + * @throws TException + */ + long allocateTableWriteId(long txnId, String dbName, String tableName) throws TException; + + /** + * Allocate a per table write ID and associate it with the given transaction + * @param txnIds ids of transaction batchto which the allocated write ID to be associated. + * @param dbName name of DB in which the table belongs. + * @param tableName table to which the write ID to be allocated + * @throws TException + */ + List allocateTableWriteIdsBatch(List txnIds, String dbName, String tableName) throws TException; + + /** * Show the list of currently open transactions. This is for use by "show transactions" in the * grammar, not for applications that want to find a list of current transactions to work with. * Those wishing the latter should call {@link #getValidTxns()}. diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java index 41e428b..1834e20 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hive.metastore.txn; -import org.apache.hadoop.hive.common.ValidCompactorTxnList; +import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; +import org.apache.hadoop.hive.metastore.api.OpenWriteIds; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -42,11 +42,11 @@ public boolean tooManyAborts = false; /** * {@code 0} means it wasn't set (e.g. in case of upgrades, since ResultSet.getLong() will return 0 if field is NULL) - * See {@link TxnStore#setCompactionHighestTxnId(CompactionInfo, long)} for precise definition. - * See also {@link TxnUtils#createValidCompactTxnList(GetOpenTxnsInfoResponse)} and - * {@link ValidCompactorTxnList#highWatermark} + * See {@link TxnStore#setCompactionHighestWriteId(CompactionInfo, long)} for precise definition. + * See also {@link TxnUtils#createValidCompactWriteIdList(OpenWriteIds)} and + * {@link ValidCompactorWriteIdList#highWatermark} */ - public long highestTxnId; + public long highestWriteId; byte[] metaInfo; String hadoopJobId; @@ -107,7 +107,7 @@ public String toString() { "properties:" + properties + "," + "runAs:" + runAs + "," + "tooManyAborts:" + tooManyAborts + "," + - "highestTxnId:" + highestTxnId; + "highestWriteId:" + highestWriteId; } /** @@ -127,7 +127,7 @@ static CompactionInfo loadFullFromCompactionQueue(ResultSet rs) throws SQLExcept fullCi.workerId = rs.getString(8); fullCi.start = rs.getLong(9); fullCi.runAs = rs.getString(10); - fullCi.highestTxnId = rs.getLong(11); + fullCi.highestWriteId = rs.getLong(11); fullCi.metaInfo = rs.getBytes(12); fullCi.hadoopJobId = rs.getString(13); return fullCi; @@ -144,7 +144,7 @@ static void insertIntoCompletedCompactions(PreparedStatement pStmt, CompactionIn pStmt.setLong(9, ci.start); pStmt.setLong(10, endTime); pStmt.setString(11, ci.runAs); - pStmt.setLong(12, ci.highestTxnId); + pStmt.setLong(12, ci.highestWriteId); pStmt.setBytes(13, ci.metaInfo); pStmt.setString(14, ci.hadoopJobId); } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index a90b7d4..ab4f628 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -287,7 +287,7 @@ public void markCompacted(CompactionInfo info) throws MetaException { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); String s = "select cq_id, cq_database, cq_table, cq_partition, " + - "cq_type, cq_run_as, cq_highest_txn_id from COMPACTION_QUEUE where cq_state = '" + READY_FOR_CLEANING + "'"; + "cq_type, cq_run_as, cq_highest_write_id from COMPACTION_QUEUE where cq_state = '" + READY_FOR_CLEANING + "'"; LOG.debug("Going to execute query <" + s + ">"); rs = stmt.executeQuery(s); while (rs.next()) { @@ -302,7 +302,7 @@ public void markCompacted(CompactionInfo info) throws MetaException { default: throw new MetaException("Unexpected compaction type " + rs.getString(5)); } info.runAs = rs.getString(6); - info.highestTxnId = rs.getLong(7); + info.highestWriteId = rs.getLong(7); rc.add(info); } LOG.debug("Going to rollback"); @@ -338,7 +338,7 @@ public void markCleaned(CompactionInfo info) throws MetaException { ResultSet rs = null; try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?"); + pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?"); pStmt.setLong(1, info.id); rs = pStmt.executeQuery(); if(rs.next()) { @@ -358,20 +358,20 @@ public void markCleaned(CompactionInfo info) throws MetaException { LOG.debug("Going to rollback"); dbConn.rollback(); } - pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_TXN_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)"); + pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)"); info.state = SUCCEEDED_STATE; CompactionInfo.insertIntoCompletedCompactions(pStmt, info, getDbTime(dbConn)); updCount = pStmt.executeUpdate(); // Remove entries from completed_txn_components as well, so we don't start looking there - // again but only up to the highest txn ID include in this compaction job. - //highestTxnId will be NULL in upgrade scenarios + // again but only up to the highest write ID include in this compaction job. + //highestWriteId will be NULL in upgrade scenarios s = "delete from COMPLETED_TXN_COMPONENTS where ctc_database = ? and " + "ctc_table = ?"; if (info.partName != null) { s += " and ctc_partition = ?"; } - if(info.highestTxnId != 0) { + if(info.highestWriteId != 0) { s += " and ctc_txnid <= ?"; } pStmt = dbConn.prepareStatement(s); @@ -381,8 +381,8 @@ public void markCleaned(CompactionInfo info) throws MetaException { if (info.partName != null) { pStmt.setString(paramCount++, info.partName); } - if(info.highestTxnId != 0) { - pStmt.setLong(paramCount++, info.highestTxnId); + if(info.highestWriteId != 0) { + pStmt.setLong(paramCount++, info.highestWriteId); } LOG.debug("Going to execute update <" + s + ">"); if (pStmt.executeUpdate() < 1) { @@ -392,15 +392,15 @@ public void markCleaned(CompactionInfo info) throws MetaException { s = "select distinct txn_id from TXNS, TXN_COMPONENTS where txn_id = tc_txnid and txn_state = '" + TXN_ABORTED + "' and tc_database = ? and tc_table = ?"; - if (info.highestTxnId != 0) s += " and txn_id <= ?"; + if (info.highestWriteId != 0) s += " and txn_id <= ?"; if (info.partName != null) s += " and tc_partition = ?"; pStmt = dbConn.prepareStatement(s); paramCount = 1; pStmt.setString(paramCount++, info.dbname); pStmt.setString(paramCount++, info.tableName); - if(info.highestTxnId != 0) { - pStmt.setLong(paramCount++, info.highestTxnId); + if(info.highestWriteId != 0) { + pStmt.setLong(paramCount++, info.highestWriteId); } if (info.partName != null) { pStmt.setString(paramCount++, info.partName); @@ -700,14 +700,14 @@ public void revokeTimedoutWorkers(long timeout) throws MetaException { */ @Override @RetrySemantics.Idempotent - public void setCompactionHighestTxnId(CompactionInfo ci, long highestTxnId) throws MetaException { + public void setCompactionHighestWriteId(CompactionInfo ci, long highestWriteId) throws MetaException { Connection dbConn = null; Statement stmt = null; try { try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); - int updCount = stmt.executeUpdate("UPDATE COMPACTION_QUEUE SET CQ_HIGHEST_TXN_ID = " + highestTxnId + + int updCount = stmt.executeUpdate("UPDATE COMPACTION_QUEUE SET CQ_HIGHEST_WRITE_ID = " + highestWriteId + " WHERE CQ_ID = " + ci.id); if(updCount != 1) { throw new IllegalStateException("Could not find record in COMPACTION_QUEUE for " + ci); @@ -715,14 +715,14 @@ public void setCompactionHighestTxnId(CompactionInfo ci, long highestTxnId) thro dbConn.commit(); } catch (SQLException e) { rollbackDBConn(dbConn); - checkRetryable(dbConn, e, "setCompactionHighestTxnId(" + ci + "," + highestTxnId + ")"); + checkRetryable(dbConn, e, "setCompactionHighestWriteId(" + ci + "," + highestWriteId + ")"); throw new MetaException("Unable to connect to transaction database " + StringUtils.stringifyException(e)); } finally { close(null, stmt, dbConn); } } catch (RetryException ex) { - setCompactionHighestTxnId(ci, highestTxnId); + setCompactionHighestWriteId(ci, highestWriteId); } } private static class RetentionCounters { @@ -932,7 +932,7 @@ public void markFailed(CompactionInfo ci) throws MetaException {//todo: this sho try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); - pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?"); + pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?"); pStmt.setLong(1, ci.id); rs = pStmt.executeQuery(); if(rs.next()) { @@ -966,7 +966,7 @@ public void markFailed(CompactionInfo ci) throws MetaException {//todo: this sho close(rs, stmt, null); closeStmt(pStmt); - pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_TXN_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)"); + pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)"); CompactionInfo.insertIntoCompletedCompactions(pStmt, ci, getDbTime(dbConn)); int updCount = pStmt.executeUpdate(); LOG.debug("Going to commit"); diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java index e724723..3ab186f 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java @@ -96,6 +96,15 @@ public static void prepDb(Configuration conf) throws Exception { " CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL)"); stmt.execute("CREATE TABLE NEXT_TXN_ID (" + " NTXN_NEXT bigint NOT NULL)"); stmt.execute("INSERT INTO NEXT_TXN_ID VALUES(1)"); + + stmt.execute("CREATE TABLE TXN_TO_WRITE_ID (" + + " T2W_TXNID bigint," + + " T2W_TABLE varchar(256) NOT NULL," + + " T2W_WRITEID bigint NOT NULL)"); + stmt.execute("CREATE TABLE NEXT_WRITE_ID (" + + " NWI_TABLE varchar(256) NOT NULL," + + " NWI_NEXT bigint NOT NULL)"); + stmt.execute("CREATE TABLE HIVE_LOCKS (" + " HL_LOCK_EXT_ID bigint NOT NULL," + " HL_LOCK_INT_ID bigint NOT NULL," + @@ -130,7 +139,7 @@ public static void prepDb(Configuration conf) throws Exception { " CQ_WORKER_ID varchar(128)," + " CQ_START bigint," + " CQ_RUN_AS varchar(128)," + - " CQ_HIGHEST_TXN_ID bigint," + + " CQ_HIGHEST_WRITE_ID bigint," + " CQ_META_INFO varchar(2048) for bit data," + " CQ_HADOOP_JOB_ID varchar(32))"); @@ -149,7 +158,7 @@ public static void prepDb(Configuration conf) throws Exception { " CC_START bigint," + " CC_END bigint," + " CC_RUN_AS varchar(128)," + - " CC_HIGHEST_TXN_ID bigint," + + " CC_HIGHEST_WRITE_ID bigint," + " CC_META_INFO varchar(2048) for bit data," + " CC_HADOOP_JOB_ID varchar(32))"); @@ -219,6 +228,8 @@ public static void cleanDb(Configuration conf) throws Exception { success &= dropTable(stmt, "COMPLETED_TXN_COMPONENTS", retryCount); success &= dropTable(stmt, "TXNS", retryCount); success &= dropTable(stmt, "NEXT_TXN_ID", retryCount); + success &= dropTable(stmt, "TXN_TO_WRITE_ID", retryCount); + success &= dropTable(stmt, "NEXT_WRITE_ID", retryCount); success &= dropTable(stmt, "HIVE_LOCKS", retryCount); success &= dropTable(stmt, "NEXT_LOCK_ID", retryCount); success &= dropTable(stmt, "COMPACTION_QUEUE", retryCount); diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 3a558b4..24989b0 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -66,6 +66,9 @@ import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest; import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; +import org.apache.hadoop.hive.metastore.api.AddTransactionalTableRequest; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdRequest; +import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdResponse; import org.apache.hadoop.hive.metastore.api.BasicTxnInfo; import org.apache.hadoop.hive.metastore.api.CheckLockRequest; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; @@ -77,6 +80,8 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; +import org.apache.hadoop.hive.metastore.api.GetOpenWriteIdsRequest; +import org.apache.hadoop.hive.metastore.api.GetOpenWriteIdsResponse; import org.apache.hadoop.hive.metastore.api.HeartbeatRequest; import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest; import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; @@ -91,6 +96,7 @@ import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; +import org.apache.hadoop.hive.metastore.api.OpenWriteIds; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; @@ -103,6 +109,7 @@ import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.metastore.api.TxnOpenException; import org.apache.hadoop.hive.metastore.api.TxnState; +import org.apache.hadoop.hive.metastore.api.TxnToWriteId; import org.apache.hadoop.hive.metastore.api.TxnsSnapshot; import org.apache.hadoop.hive.metastore.api.UnlockRequest; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -867,6 +874,267 @@ public void commitTxn(CommitTxnRequest rqst) } @Override + @RetrySemantics.ReadOnly + public GetOpenWriteIdsResponse getOpenWriteIds(GetOpenWriteIdsRequest rqst) + throws NoSuchTxnException, MetaException { + try { + // We need to figure out the current transaction number and the list of + // open transactions. To avoid needing a transaction on the underlying + // database we'll look at the current transaction number first. If it + // subsequently shows up in the open list that's ok. + Connection dbConn = null; + Statement stmt = null; + ResultSet rs = null; + try { + /** + * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()} + */ + dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); + stmt = dbConn.createStatement(); + String s = "select ntxn_next - 1 from NEXT_TXN_ID"; + LOG.debug("Going to execute query <" + s + ">"); + rs = stmt.executeQuery(s); + if (!rs.next()) { + throw new MetaException("Transaction tables not properly " + + "initialized, no record found in next_txn_id"); + } + long txnHwm = rs.getLong(1); + if (rs.wasNull()) { + throw new MetaException("Transaction tables not properly " + + "initialized, null record found in next_txn_id"); + } + close(rs); + List openList = new ArrayList<>(); + List abortedList = new ArrayList<>(); + //need the WHERE clause below to ensure consistent results with READ_COMMITTED + s = "select txn_id, txn_state from TXNS where txn_id <= " + txnHwm + " order by txn_id"; + LOG.debug("Going to execute query<" + s + ">"); + rs = stmt.executeQuery(s); + while (rs.next()) { + long txnId = rs.getLong(1); + char c = rs.getString(2).charAt(0); + if (c == TXN_OPEN) { + openList.add(txnId); + } else if (c == TXN_ABORTED) { + abortedList.add(txnId); + } + } + long currentTxn = rqst.getCurrentTxnId(); + List openWriteIdsList = new ArrayList<>(); + for (String table : rqst.getTableNames()) { + OpenWriteIds writeIds = getOpenWriteIdsForTable(stmt, table, currentTxn, + txnHwm, openList, abortedList); + openWriteIdsList.add(writeIds); + } + + LOG.debug("Going to rollback"); + dbConn.rollback(); + GetOpenWriteIdsResponse owr = new GetOpenWriteIdsResponse(openWriteIdsList); + return owr; + } catch (SQLException e) { + LOG.debug("Going to rollback"); + rollbackDBConn(dbConn); + checkRetryable(dbConn, e, "getOpenWriteIds"); + throw new MetaException("Unable to select from transaction database, " + + StringUtils.stringifyException(e)); + } finally { + close(rs, stmt, dbConn); + } + } catch (RetryException e) { + return getOpenWriteIds(rqst); + } + } + + private OpenWriteIds getOpenWriteIdsForTable(Statement stmt, String tableName, long currentTxn, + long txnHwm, ListopenList, ListabortedList) throws SQLException { + ResultSet rs = null; + try { + // Need to initialize to 0 to make sure if nobody modified this table, then current txn + // shouldn't read any data + long writeIdHwm = 0; + List openWriteIdList = new ArrayList<>(); + + // The output includes all the txns which are under the high water mark. It includes + // the committed transactions as well. + String s = "select t2w_txnid, t2w_writeid from TXN_TO_WRITE_ID where t2w_txnid <= " + txnHwm + + " and t2w_table = " + quoteString(tableName) + " order by t2w_writeid"; + LOG.debug("Going to execute query<" + s + ">"); + rs = stmt.executeQuery(s); + long minOpenWriteId = Long.MAX_VALUE; + BitSet abortedBits = new BitSet(); + while (rs.next()) { + long txnId = rs.getLong(1); + long writeId = rs.getLong(2); + writeIdHwm = Math.max(writeIdHwm, writeId); + + // Skip the current transaction from the open list as the write ID corresponding to this + // should be valid within current transaction + if (txnId == currentTxn) { + continue; + } + if (openList.contains(txnId)) { + openWriteIdList.add(writeId); + minOpenWriteId = Math.min(minOpenWriteId, writeId); + } else if (abortedList.contains(txnId)) { + openWriteIdList.add(writeId); + abortedBits.set(openWriteIdList.size() - 1); + } + + // Skip of the transaction under evaluation is already committed. + } + + ByteBuffer byteBuffer = ByteBuffer.wrap(abortedBits.toByteArray()); + OpenWriteIds owi = new OpenWriteIds(tableName, writeIdHwm, openWriteIdList, byteBuffer); + if (minOpenWriteId < Long.MAX_VALUE) { + owi.setMinWriteId(minOpenWriteId); + } + return owi; + } finally { + close(rs); + } + } + + @Override + public void addTransactionalTable(AddTransactionalTableRequest rqst) throws MetaException { + String fullTableName = TxnUtils.getFullTableName(rqst.getDbName(), rqst.getTableName()); + + try { + Connection dbConn = null; + Statement stmt = null; + ResultSet rs = null; + try { + lockInternal(); + dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); + stmt = dbConn.createStatement(); + + // Check if any entry already exist for the given table name, + // If yes, then reset next ID to 1 else create a new entry with initial value as 1 + String s = sqlGenerator.addForUpdateClause( + "select nwi_next from NEXT_WRITE_ID where nwi_table = " + quoteString(fullTableName)); + + LOG.debug("Going to execute query <" + s + ">"); + rs = stmt.executeQuery(s); + if (rs.next()) { + s = "update NEXT_WRITE_ID set nwi_next = 1 where nwi_table = " + quoteString(fullTableName); + LOG.debug("Going to execute update <" + s + ">"); + stmt.executeUpdate(s); + } else { + s = "insert into NEXT_WRITE_ID (nwi_table, nwi_next) values (" + quoteString(fullTableName) + ", 1)"; + LOG.debug("Going to execute insert <" + s + ">"); + stmt.execute(s); + } + + LOG.debug("Going to commit"); + dbConn.commit(); + } catch (SQLException e) { + LOG.debug("Going to rollback"); + rollbackDBConn(dbConn); + checkRetryable(dbConn, e, "addTransactionalTable(" + rqst + ")"); + throw new MetaException("Unable to update transaction database " + + StringUtils.stringifyException(e)); + } finally { + close(rs, stmt, dbConn); + unlockInternal(); + } + } catch (RetryException e) { + addTransactionalTable(rqst); + } + } + + @Override + public AllocateTableWriteIdResponse allocateTableWriteId(AllocateTableWriteIdRequest rqst) + throws NoSuchTxnException, TxnAbortedException, MetaException { + List txnIds = rqst.getTxnIds(); + String fullTableName = TxnUtils.getFullTableName(rqst.getDbName(), rqst.getTableName()); + try { + Connection dbConn = null; + Statement stmt = null; + ResultSet rs = null; + try { + lockInternal(); + dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); + stmt = dbConn.createStatement(); + List txnToWriteIds = new ArrayList<>(); + List newAllocTxns = new ArrayList<>(); + String s; + long writeId; + + for (long txnId : txnIds) { + // Validate the transaction's state. Write ID should be allocated only for open transactions + TxnStatus txnStatus = findTxnState(txnId, stmt); + if (txnStatus != TxnStatus.OPEN) { + raiseTxnUnexpectedState(txnStatus, txnId); + shouldNeverHappen(txnId); + //dbConn is rolled back in finally{} + } + + // If table write ID is already allocated for the current transaction, then just return it + // else allocate it + s = "select t2w_writeid from TXN_TO_WRITE_ID where t2w_txnid = " + txnId + + " and t2w_table = " + quoteString(fullTableName); + LOG.debug("Going to execute query <" + s + ">"); + rs = stmt.executeQuery(s); + if (rs.next()) { + writeId = rs.getLong(1); + txnToWriteIds.add(new TxnToWriteId(txnId, writeId)); + } else { + newAllocTxns.add(txnId); + } + } + + // If all the txns in the list have already allocated write ids, then just skip new allocations + long numOfWriteIds = newAllocTxns.size(); + if (0 == numOfWriteIds) { + return new AllocateTableWriteIdResponse(txnToWriteIds); + } + + // Get the next write ID for the given table and increment it + s = sqlGenerator.addForUpdateClause( + "select nwi_next from NEXT_WRITE_ID where nwi_table = " + quoteString(fullTableName)); + LOG.debug("Going to execute query <" + s + ">"); + rs = stmt.executeQuery(s); + if (!rs.next()) { + // First allocation of write id should add the table to the next_write_id meta table + s = "insert into NEXT_WRITE_ID (nwi_table, nwi_next) values (" + + quoteString(fullTableName) + "," + String.valueOf(numOfWriteIds + 1) + ")"; + LOG.debug("Going to execute insert <" + s + ">"); + stmt.execute(s); + writeId = 1; + } else { + writeId = rs.getLong(1); + s = "update NEXT_WRITE_ID set nwi_next = " + (writeId + numOfWriteIds) + + " where nwi_table = " + quoteString(fullTableName); + LOG.debug("Going to execute update <" + s + ">"); + stmt.executeUpdate(s); + } + + for (long txnId : newAllocTxns) { + s = "insert into TXN_TO_WRITE_ID (t2w_txnid, t2w_table, t2w_writeid) values (" + + txnId + ", " + quoteString(fullTableName) + ", " + writeId + ")"; + LOG.debug("Going to execute insert <" + s + ">"); + stmt.execute(s); + txnToWriteIds.add(new TxnToWriteId(txnId, writeId++)); + } + + LOG.debug("Going to commit"); + dbConn.commit(); + return new AllocateTableWriteIdResponse(txnToWriteIds); + } catch (SQLException e) { + LOG.debug("Going to rollback"); + rollbackDBConn(dbConn); + checkRetryable(dbConn, e, "allocateTableWriteId(" + rqst + ")"); + throw new MetaException("Unable to update transaction database " + + StringUtils.stringifyException(e)); + } finally { + close(rs, stmt, dbConn); + unlockInternal(); + } + } catch (RetryException e) { + return allocateTableWriteId(rqst); + } + } + + @Override @RetrySemantics.SafeToRetry public void performWriteSetGC() { Connection dbConn = null; diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java index 42f90cd..5ebd790 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.metastore.api.*; import java.sql.SQLException; +import java.util.IllegalFormatCodePointException; import java.util.Iterator; import java.util.List; import java.util.Set; @@ -141,6 +142,31 @@ public BasicTxnInfo getLastCompletedTransactionForTable( public BasicTxnInfo getFirstCompletedTransactionForTableAfterCommit( String inputDbName, String inputTableName, long id) throws MetaException; + /** + * Gets the list of write ids which are open/aborted + * @param rqst info on transaction and list of table names associated with given transaction + * @throws NoSuchTxnException + * @throws MetaException + */ + @RetrySemantics.ReadOnly + GetOpenWriteIdsResponse getOpenWriteIds(GetOpenWriteIdsRequest rqst) + throws NoSuchTxnException, MetaException; + /** + * Create metadata for transactional table which is newly added + * @param rqst info on transactional table for which need to add metadata + * @throws MetaException + */ + void addTransactionalTable(AddTransactionalTableRequest rqst) throws MetaException; + + /** + * Allocate a write ID for the given table and associate it with a transaction + * @param rqst info on transaction and table to allocate write id + * @throws NoSuchTxnException + * @throws TxnAbortedException + * @throws MetaException + */ + AllocateTableWriteIdResponse allocateTableWriteId(AllocateTableWriteIdRequest rqst) + throws NoSuchTxnException, TxnAbortedException, MetaException; /** * Obtain a lock. @@ -368,10 +394,10 @@ void cleanupRecords(HiveObjectType type, Database db, Table table, List findColumnsWithStats(CompactionInfo ci) throws MetaException; /** - * Record the highest txn id that the {@code ci} compaction job will pay attention to. + * Record the highest write id that the {@code ci} compaction job will pay attention to. */ @RetrySemantics.Idempotent - void setCompactionHighestTxnId(CompactionInfo ci, long highestTxnId) throws MetaException; + void setCompactionHighestWriteId(CompactionInfo ci, long highestWriteId) throws MetaException; /** * For any given compactable entity (partition, table if not partitioned) the history of compactions diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index 027fb3f..44c2282 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -18,16 +18,14 @@ package org.apache.hadoop.hive.metastore.txn; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.ValidCompactorTxnList; +import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TxnInfo; -import org.apache.hadoop.hive.metastore.api.TxnState; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.utils.JavaUtils; @@ -63,53 +61,84 @@ public static ValidTxnList createValidReadTxnList(GetOpenTxnsResponse txns, long BitSet abortedBits = BitSet.valueOf(txns.getAbortedBits()); long[] exceptions = new long[open.size() - (currentTxn > 0 ? 1 : 0)]; int i = 0; - for(long txn: open) { + for (long txn : open) { if (currentTxn > 0 && currentTxn == txn) continue; exceptions[i++] = txn; } - if(txns.isSetMin_open_txn()) { + if (txns.isSetMin_open_txn()) { return new ValidReadTxnList(exceptions, abortedBits, highWater, txns.getMin_open_txn()); - } - else { + } else { return new ValidReadTxnList(exceptions, abortedBits, highWater); } } /** - * Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse} to a - * {@link org.apache.hadoop.hive.common.ValidTxnList}. This assumes that the caller intends to + * Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenWriteIdsResponse} to a + * {@link org.apache.hadoop.hive.common.ValidTxnWriteIdList}. This assumes that the caller intends to + * read the files, and thus treats both open and aborted transactions as invalid. + * @param writeIds write ids list from the metastore + * @return a valid write IDs list for the whole transaction. + */ + public static ValidTxnWriteIdList createValidTxnWriteIdList(GetOpenWriteIdsResponse writeIds) { + ValidTxnWriteIdList validTxnWriteIdList = new ValidTxnWriteIdList(); + for (OpenWriteIds tableWriteIds : writeIds.getOpenWriteIds()) { + validTxnWriteIdList.addTableWriteId(createValidReaderWriteIdList(tableWriteIds)); + } + return validTxnWriteIdList; + } + + public static ValidReaderWriteIdList createValidReaderWriteIdList(OpenWriteIds tableWriteIds) { + String tableName = tableWriteIds.getTableName(); + long highWater = tableWriteIds.getWriteIdHighWaterMark(); + List open = tableWriteIds.getOpenWriteIds(); + BitSet abortedBits = BitSet.valueOf(tableWriteIds.getAbortedBits()); + long[] exceptions = new long[open.size()]; + int i = 0; + for (long writeId : open) { + exceptions[i++] = writeId; + } + if (tableWriteIds.isSetMinWriteId()) { + return new ValidReaderWriteIdList(tableName, exceptions, abortedBits, highWater, tableWriteIds.getMinWriteId()); + } else { + return new ValidReaderWriteIdList(tableName, exceptions, abortedBits, highWater); + } + } + + /** + * Transform a {@link org.apache.hadoop.hive.metastore.api.OpenWriteIds} to a + * {@link org.apache.hadoop.hive.common.ValidWriteIdList}. This assumes that the caller intends to * compact the files, and thus treats only open transactions as invalid. Additionally any - * txnId > highestOpenTxnId is also invalid. This is to avoid creating something like - * delta_17_120 where txnId 80, for example, is still open. - * @param txns txn list from the metastore - * @return a valid txn list. + * writeId > highestOpenWriteId is also invalid. This is to avoid creating something like + * delta_17_120 where writeId 80, for example, is still open. + * @param tableWriteIds table write id list from the metastore + * @return a valid write id list. */ - public static ValidTxnList createValidCompactTxnList(GetOpenTxnsInfoResponse txns) { - //highWater is the last txn id that has been allocated - long highWater = txns.getTxn_high_water_mark(); - long minOpenTxn = Long.MAX_VALUE; - long[] exceptions = new long[txns.getOpen_txnsSize()]; + public static ValidWriteIdList createValidCompactWriteIdList(OpenWriteIds tableWriteIds) { + String tableName = tableWriteIds.getTableName(); + long highWater = tableWriteIds.getWriteIdHighWaterMark(); + long minOpenWriteId = Long.MAX_VALUE; + List open = tableWriteIds.getOpenWriteIds(); + BitSet abortedBits = BitSet.valueOf(tableWriteIds.getAbortedBits()); + long[] exceptions = new long[open.size()]; int i = 0; - for (TxnInfo txn : txns.getOpen_txns()) { - if (txn.getState() == TxnState.OPEN) { - minOpenTxn = Math.min(minOpenTxn, txn.getId()); - } - else { - //only need aborted since we don't consider anything above minOpenTxn - exceptions[i++] = txn.getId(); + for (long writeId : open) { + if (abortedBits.get(i)) { + // Only need aborted since we don't consider anything above minOpenWriteId + exceptions[i++] = writeId; + } else { + minOpenWriteId = Math.min(minOpenWriteId, writeId); } } if(i < exceptions.length) { exceptions = Arrays.copyOf(exceptions, i); } - highWater = minOpenTxn == Long.MAX_VALUE ? highWater : minOpenTxn - 1; + highWater = minOpenWriteId == Long.MAX_VALUE ? highWater : minOpenWriteId - 1; BitSet bitSet = new BitSet(exceptions.length); - bitSet.set(0, exceptions.length); // for ValidCompactorTxnList, everything in exceptions are aborted - if(minOpenTxn == Long.MAX_VALUE) { - return new ValidCompactorTxnList(exceptions, bitSet, highWater); - } - else { - return new ValidCompactorTxnList(exceptions, bitSet, highWater, minOpenTxn); + bitSet.set(0, exceptions.length); // for ValidCompactorWriteIdList, everything in exceptions are aborted + if (minOpenWriteId == Long.MAX_VALUE) { + return new ValidCompactorWriteIdList(tableName, exceptions, bitSet, highWater); + } else { + return new ValidCompactorWriteIdList(tableName, exceptions, bitSet, highWater, minOpenWriteId); } } @@ -157,6 +186,14 @@ public static boolean isAcidTable(Table table) { } /** + * Should produce the same result as + * {@link org.apache.hadoop.hive.ql.io.AcidUtils#getFullTableName(String, String)} + */ + public static String getFullTableName(String dbName, String tableName) { + return dbName.toLowerCase() + "." + tableName.toLowerCase(); + } + + /** * Build a query (or queries if one query is too big but only for the case of 'IN' * composite clause. For the case of 'NOT IN' clauses, multiple queries change * the semantics of the intended query. diff --git a/standalone-metastore/src/main/thrift/hive_metastore.thrift b/standalone-metastore/src/main/thrift/hive_metastore.thrift index 93f3e53..4e53ddc 100644 --- a/standalone-metastore/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -731,6 +731,43 @@ struct CommitTxnRequest { 1: required i64 txnid, } +struct GetOpenWriteIdsRequest { + 1: required i64 currentTxnId, + 2: required list tableNames, +} + +struct OpenWriteIds { + 1: required string tableName, + 2: required i64 writeIdHighWaterMark, + 3: required list openWriteIds, + 4: optional i64 minWriteId, + 5: required binary abortedBits, +} + +struct GetOpenWriteIdsResponse { + 1: required list openWriteIds, +} + +struct AddTransactionalTableRequest { + 1: required string dbName, + 2: required string tableName, +} + +struct AllocateTableWriteIdRequest { + 1: required list txnIds, + 2: required string dbName, + 3: required string tableName, +} + +struct TxnToWriteId { + 1: required i64 txnId, + 2: required i64 writeId, +} + +struct AllocateTableWriteIdResponse { + 1: required list txnToWriteIds, +} + struct LockComponent { 1: required LockType type, 2: required LockLevel level, @@ -1804,6 +1841,11 @@ service ThriftHiveMetastore extends fb303.FacebookService void abort_txn(1:AbortTxnRequest rqst) throws (1:NoSuchTxnException o1) void abort_txns(1:AbortTxnsRequest rqst) throws (1:NoSuchTxnException o1) void commit_txn(1:CommitTxnRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2) + GetOpenWriteIdsResponse get_open_write_ids(1:GetOpenWriteIdsRequest rqst) + throws (1:NoSuchTxnException o1, 2:MetaException o2) + void add_transactional_table(1:AddTransactionalTableRequest rqst) throws (1:MetaException o1) + AllocateTableWriteIdResponse allocate_table_write_id(1:AllocateTableWriteIdRequest rqst) + throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2, 3:MetaException o3) LockResponse lock(1:LockRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2) LockResponse check_lock(1:CheckLockRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2, 3:NoSuchLockException o3) diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorWriteIdList.java similarity index 53% rename from storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java rename to storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorWriteIdList.java index 94b8c58..1fcfc6a 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorWriteIdList.java @@ -22,35 +22,38 @@ import java.util.BitSet; /** - * An implementation of {@link org.apache.hadoop.hive.common.ValidTxnList} for use by the compactor. - * + * An implementation of {@link ValidWriteIdList} for use by the compactor. + * * Compaction should only include txns up to smallest open txn (exclussive). - * There may be aborted txns in the snapshot represented by this ValidCompactorTxnList. - * Thus {@link #isTxnRangeValid(long, long)} returns NONE for any range that inluces any unresolved - * transactions. Any txn above {@code highWatermark} is unresolved. + * There may be aborted write ids in the snapshot represented by this ValidCompactorWriteIdList. + * Thus {@link #isWriteIdRangeValid(long, long)} returns NONE for any range that includes any unresolved + * write ids. Any write id above {@code highWatermark} is unresolved. * These produce the logic we need to assure that the compactor only sees records less than the lowest - * open transaction when choosing which files to compact, but that it still ignores aborted + * open write ids when choosing which files to compact, but that it still ignores aborted * records when compacting. - * + * * See org.apache.hadoop.hive.metastore.txn.TxnUtils#createValidCompactTxnList() for proper * way to construct this. */ -public class ValidCompactorTxnList extends ValidReadTxnList { - public ValidCompactorTxnList() { +public class ValidCompactorWriteIdList extends ValidReaderWriteIdList { + public ValidCompactorWriteIdList() { super(); } - public ValidCompactorTxnList(long[] abortedTxnList, BitSet abortedBits, long highWatermark) { - this(abortedTxnList, abortedBits, highWatermark, Long.MAX_VALUE); + public ValidCompactorWriteIdList(String tableName, long[] abortedWriteIdList, BitSet abortedBits, long highWatermark) { + this(tableName, abortedWriteIdList, abortedBits, highWatermark, Long.MAX_VALUE); } /** - * @param abortedTxnList list of all aborted transactions + * @param tableName table which is under compaction. Full name of format . + * @param abortedWriteIdList list of all aborted write ids * @param abortedBits bitset marking whether the corresponding transaction is aborted - * @param highWatermark highest committed transaction to be considered for compaction, - * equivalently (lowest_open_txn - 1). + * @param highWatermark highest committed write id to be considered for compaction, + * equivalently (lowest_open_write_id - 1). + * @param minOpenWriteId minimum write ID which maps to a open transaction */ - public ValidCompactorTxnList(long[] abortedTxnList, BitSet abortedBits, long highWatermark, long minOpenTxnId) { + public ValidCompactorWriteIdList(String tableName, + long[] abortedWriteIdList, BitSet abortedBits, long highWatermark, long minOpenWriteId) { // abortedBits should be all true as everything in exceptions are aborted txns - super(abortedTxnList, abortedBits, highWatermark, minOpenTxnId); + super(tableName, abortedWriteIdList, abortedBits, highWatermark, minOpenWriteId); if(this.exceptions.length <= 0) { return; } @@ -66,24 +69,24 @@ public ValidCompactorTxnList(long[] abortedTxnList, BitSet abortedBits, long hig } /* * ensure that we throw out any exceptions above highWatermark to make - * {@link #isTxnValid(long)} faster + * {@link #isWriteIdValid(long)} faster */ this.exceptions = Arrays.copyOf(this.exceptions, lastElementPos + 1); } - public ValidCompactorTxnList(String value) { + public ValidCompactorWriteIdList(String value) { super(value); } /** - * Returns org.apache.hadoop.hive.common.ValidTxnList.RangeResponse.ALL if all txns in + * Returns org.apache.hadoop.hive.common.ValidWriteIdList.RangeResponse.ALL if all write ids in * the range are resolved and RangeResponse.NONE otherwise */ @Override - public RangeResponse isTxnRangeValid(long minTxnId, long maxTxnId) { - return highWatermark >= maxTxnId ? RangeResponse.ALL : RangeResponse.NONE; + public RangeResponse isWriteIdRangeValid(long minWriteId, long maxWriteId) { + return highWatermark >= maxWriteId ? RangeResponse.ALL : RangeResponse.NONE; } @Override - public boolean isTxnAborted(long txnid) { - return Arrays.binarySearch(exceptions, txnid) >= 0; + public boolean isWriteIdAborted(long writeId) { + return Arrays.binarySearch(exceptions, writeId) >= 0; } } diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java index ccdd4b7..1433e8e 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java @@ -65,14 +65,6 @@ public boolean isTxnValid(long txnid) { return Arrays.binarySearch(exceptions, txnid) < 0; } - /** - * We cannot use a base file if its range contains an open txn. - * @param txnid from base_xxxx - */ - @Override - public boolean isValidBase(long txnid) { - return minOpenTxn > txnid && txnid <= highWatermark; - } @Override public RangeResponse isTxnRangeValid(long minTxnId, long maxTxnId) { // check the easy cases first diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java new file mode 100644 index 0000000..9e1fcc3 --- /dev/null +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java @@ -0,0 +1,254 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.common; + +import java.util.Arrays; +import java.util.BitSet; + +/** + * An implementation of {@link ValidWriteIdList} for use by readers. + * This class will view a write id as valid only if it maps to committed transaction. + * Write ids of both open and aborted transactions will be seen as invalid. + */ +public class ValidReaderWriteIdList implements ValidWriteIdList { + + protected String tableName; // Full table name of format . + protected long[] exceptions; + protected BitSet abortedBits; // BitSet for flagging aborted write ids. Bit is true if aborted, false if open + //default value means there are no open write ids in the snapshot + private long minOpenWriteId = Long.MAX_VALUE; + protected long highWatermark; + + public ValidReaderWriteIdList() { + this(null, new long[0], new BitSet(), Long.MAX_VALUE, Long.MAX_VALUE); + } + + /** + * Used if there are no open write ids in the snapshot + */ + public ValidReaderWriteIdList(String tableName, long[] exceptions, BitSet abortedBits, long highWatermark) { + this(tableName, exceptions, abortedBits, highWatermark, Long.MAX_VALUE); + } + public ValidReaderWriteIdList(String tableName, + long[] exceptions, BitSet abortedBits, long highWatermark, long minOpenWriteId) { + this.tableName = tableName; + if (exceptions.length > 0) { + this.minOpenWriteId = minOpenWriteId; + } + this.exceptions = exceptions; + this.abortedBits = abortedBits; + this.highWatermark = highWatermark; + } + + public ValidReaderWriteIdList(String value) { + readFromString(value); + } + + @Override + public boolean isWriteIdValid(long writeId) { + if (highWatermark < writeId) { + return false; + } + return Arrays.binarySearch(exceptions, writeId) < 0; + } + + /** + * We cannot use a base file if its range contains an open write id. + * @param writeId from base_xxxx + */ + @Override + public boolean isValidBase(long writeId) { + return minOpenWriteId > writeId && writeId <= highWatermark; + } + @Override + public RangeResponse isWriteIdRangeValid(long minWriteId, long maxWriteId) { + // check the easy cases first + if (highWatermark < minWriteId) { + return RangeResponse.NONE; + } else if (exceptions.length > 0 && exceptions[0] > maxWriteId) { + return RangeResponse.ALL; + } + + // since the exceptions and the range in question overlap, count the + // exceptions in the range + long count = Math.max(0, maxWriteId - highWatermark); + for(long txn: exceptions) { + if (minWriteId <= txn && txn <= maxWriteId) { + count += 1; + } + } + + if (count == 0) { + return RangeResponse.ALL; + } else if (count == (maxWriteId - minWriteId + 1)) { + return RangeResponse.NONE; + } else { + return RangeResponse.SOME; + } + } + + @Override + public String toString() { + return writeToString(); + } + + // Format is :::: + @Override + public String writeToString() { + StringBuilder buf = new StringBuilder(); + if (tableName == null) { + buf.append("null"); + } else { + buf.append(tableName); + } + buf.append(':'); + buf.append(highWatermark); + buf.append(':'); + buf.append(minOpenWriteId); + if (exceptions.length == 0) { + buf.append(':'); // separator for open write ids + buf.append(':'); // separator for aborted write ids + } else { + StringBuilder open = new StringBuilder(); + StringBuilder abort = new StringBuilder(); + for (int i = 0; i < exceptions.length; i++) { + if (abortedBits.get(i)) { + if (abort.length() > 0) { + abort.append(','); + } + abort.append(exceptions[i]); + } else { + if (open.length() > 0) { + open.append(','); + } + open.append(exceptions[i]); + } + } + buf.append(':'); + buf.append(open); + buf.append(':'); + buf.append(abort); + } + return buf.toString(); + } + + @Override + public void readFromString(String src) { + if (src == null || src.length() == 0) { + highWatermark = Long.MAX_VALUE; + exceptions = new long[0]; + abortedBits = new BitSet(); + } else { + String[] values = src.split(":"); + tableName = values[0]; + if (tableName.equalsIgnoreCase("null")) { + tableName = null; + } + highWatermark = Long.parseLong(values[1]); + minOpenWriteId = Long.parseLong(values[2]); + String[] openWriteIds = new String[0]; + String[] abortedWriteIds = new String[0]; + if (values.length < 4) { + openWriteIds = new String[0]; + abortedWriteIds = new String[0]; + } else if (values.length == 4) { + if (!values[3].isEmpty()) { + openWriteIds = values[3].split(","); + } + } else { + if (!values[3].isEmpty()) { + openWriteIds = values[3].split(","); + } + if (!values[4].isEmpty()) { + abortedWriteIds = values[4].split(","); + } + } + exceptions = new long[openWriteIds.length + abortedWriteIds.length]; + int i = 0; + for (String open : openWriteIds) { + exceptions[i++] = Long.parseLong(open); + } + for (String abort : abortedWriteIds) { + exceptions[i++] = Long.parseLong(abort); + } + Arrays.sort(exceptions); + abortedBits = new BitSet(exceptions.length); + for (String abort : abortedWriteIds) { + int index = Arrays.binarySearch(exceptions, Long.parseLong(abort)); + abortedBits.set(index); + } + } + } + + @Override + public String getTableName() { + return tableName; + } + + @Override + public long getHighWatermark() { + return highWatermark; + } + + @Override + public long[] getInvalidWriteIds() { + return exceptions; + } + + @Override + public Long getMinOpenWriteId() { + return minOpenWriteId == Long.MAX_VALUE ? null : minOpenWriteId; + } + + @Override + public boolean isWriteIdAborted(long writeId) { + int index = Arrays.binarySearch(exceptions, writeId); + return index >= 0 && abortedBits.get(index); + } + + @Override + public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) { + // check the easy cases first + if (highWatermark < minWriteId) { + return RangeResponse.NONE; + } + + int count = 0; // number of aborted txns found in exceptions + + // traverse the aborted txns list, starting at first aborted txn index + for (int i = abortedBits.nextSetBit(0); i >= 0; i = abortedBits.nextSetBit(i + 1)) { + long abortedTxnId = exceptions[i]; + if (abortedTxnId > maxWriteId) { // we've already gone beyond the specified range + break; + } + if (abortedTxnId >= minWriteId && abortedTxnId <= maxWriteId) { + count++; + } + } + + if (count == 0) { + return RangeResponse.NONE; + } else if (count == (maxWriteId - minWriteId + 1)) { + return RangeResponse.ALL; + } else { + return RangeResponse.SOME; + } + } +} + diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java index 3ffb2d8..d4c3b09 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java @@ -47,13 +47,6 @@ public boolean isTxnValid(long txnid); /** - * Returns {@code true} if such base file can be used to materialize the snapshot represented by - * this {@code ValidTxnList}. - * @param txnid highest txn in a given base_xxxx file - */ - public boolean isValidBase(long txnid); - - /** * Find out if a range of transaction ids are valid. Note that valid may have different meanings * for different implementations, as some will only want to see committed transactions and some * both committed and aborted. diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java new file mode 100644 index 0000000..5e26a6b --- /dev/null +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.common; + +import java.util.HashMap; + +/** + * An implementation to store and manage list of ValidWriteIds for each tables read by current + * transaction + */ +public class ValidTxnWriteIdList { + /** + * Key used to store valid write id list for all the operated tables in a + * {@link org.apache.hadoop.conf.Configuration} object. + */ + public static final String VALID_TABLES_WRITEIDS_KEY = "hive.txn.tables.valid.writeids"; + + private HashMap validTablesWriteIdList = new HashMap<>(); + public ValidTxnWriteIdList() { + } + + public ValidTxnWriteIdList(String value) { + readFromString(value); + } + + @Override + public String toString() { + return writeToString(); + } + + public void addTableWriteId(ValidWriteIdList validWriteIds) { + validTablesWriteIdList.put(validWriteIds.getTableName(), validWriteIds); + } + + public ValidWriteIdList getTableWriteIdList(String tableName) { + if (validTablesWriteIdList.containsKey(tableName)) { + return validTablesWriteIdList.get(tableName); + } else { + return new ValidReaderWriteIdList(); + } + } + + public int getNumOfTables() { + return validTablesWriteIdList.size(); + } + + private void readFromString(String src) { + if ((src == null) || (src.length() == 0)) { + return; + } + String[] tblWriteIdStrList = src.split("\\$"); + for (String tableStr : tblWriteIdStrList) { + ValidWriteIdList validWriteIdList = new ValidReaderWriteIdList(tableStr); + addTableWriteId(validWriteIdList); + } + } + + private String writeToString() { + StringBuilder buf = new StringBuilder(); + int index = 0; + for (HashMap.Entry entry : validTablesWriteIdList.entrySet()) { + buf.append(entry.getValue().writeToString()); + + // Separator for multiple tables' ValidWriteIdList. Also, skip it for last entry. + index++; + if (index < validTablesWriteIdList.size()) { + buf.append('$'); + } + } + return buf.toString(); + } +} diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java new file mode 100644 index 0000000..d0d1726 --- /dev/null +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.common; + +/** + * Models the list of transactions that should be included in a snapshot. + * It is modelled as a high water mark, which is the largest transaction id that + * has been committed and a list of transactions that are not included. + */ +public interface ValidWriteIdList { + + /** + * Key used to store valid write id list in a + * {@link org.apache.hadoop.conf.Configuration} object. + */ + public static final String VALID_WRITEIDS_KEY = "hive.txn.valid.writeids"; + + /** + * The response to a range query. NONE means no values in this range match, + * SOME mean that some do, and ALL means that every value does. + */ + public enum RangeResponse {NONE, SOME, ALL}; + + /** + * Indicates whether a given write ID is valid. Note that valid may have different meanings + * for different implementations, as some will only want to see committed transactions and some + * both committed and aborted. + * @param writeId write ID of the table + * @return true if valid, false otherwise + */ + public boolean isWriteIdValid(long writeId); + + /** + * Returns {@code true} if such base file can be used to materialize the snapshot represented by + * this {@code ValidWriteIdList}. + * @param writeId highest write ID in a given base_xxxx file + */ + public boolean isValidBase(long writeId); + + /** + * Find out if a range of write ids are valid. Note that valid may have different meanings + * for different implementations, as some will only want to see committed transactions and some + * both committed and aborted. + * @param minWriteId minimum write ID to look for, inclusive + * @param maxWriteId maximum write ID to look for, inclusive + * @return Indicate whether none, some, or all of these transactions are valid. + */ + public RangeResponse isWriteIdRangeValid(long minWriteId, long maxWriteId); + + /** + * Write this ValidWriteIdList into a string. This should produce a string that + * can be used by {@link #readFromString(String)} to populate a ValidWriteIdList. + */ + public String writeToString(); + + /** + * Populate this ValidWriteIdList from the string. It is assumed that the string + * was created via {@link #writeToString()} and the exceptions list is sorted. + * @param src source string. + */ + public void readFromString(String src); + + /** + * Get the table for which the ValidWriteIdList is formed + * @return table name (.) associated with ValidWriteIdList. + */ + public String getTableName(); + + /** + * Get the largest write id used. + * @return largest write id used + */ + public long getHighWatermark(); + + /** + * Get the list of write ids under the high water mark that are not valid. Note that invalid + * may have different meanings for different implementations, as some will only want to see open + * transactions and some both open and aborted. + * @return a list of invalid write ids + */ + public long[] getInvalidWriteIds(); + + /** + * Indicates whether a given write maps to aborted transaction. + * @param writeId write id to be validated + * @return true if aborted, false otherwise + */ + public boolean isWriteIdAborted(long writeId); + + /** + * Find out if a range of write ids are aborted. + * @param minWriteId minimum write Id to look for, inclusive + * @param maxWriteId maximum write Id to look for, inclusive + * @return Indicate whether none, some, or all of these write ids are aborted. + */ + public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId); + + /** + * Returns smallest Open write Id in this set, {@code null} if there is none. + */ + Long getMinOpenWriteId(); +} diff --git a/storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorTxnList.java b/storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorTxnList.java deleted file mode 100644 index 867b652..0000000 --- a/storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorTxnList.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.common; - -import org.junit.Assert; -import org.junit.Test; - -import java.util.BitSet; - -public class TestValidCompactorTxnList { - - @Test - public void minTxnHigh() { - BitSet bitSet = new BitSet(2); - bitSet.set(0, 2); - ValidTxnList txns = new ValidCompactorTxnList(new long[]{3, 4}, bitSet, 2); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - } - - @Test - public void maxTxnLow() { - BitSet bitSet = new BitSet(2); - bitSet.set(0, 2); - ValidTxnList txns = new ValidCompactorTxnList(new long[]{13, 14}, bitSet, 12); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp); - } - - @Test - public void minTxnHighNoExceptions() { - ValidTxnList txns = new ValidCompactorTxnList(new long[0], new BitSet(), 5); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - } - - @Test - public void maxTxnLowNoExceptions() { - ValidTxnList txns = new ValidCompactorTxnList(new long[0], new BitSet(), 15); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp); - } - - @Test - public void exceptionsAllBelow() { - BitSet bitSet = new BitSet(2); - bitSet.set(0, 2); - ValidTxnList txns = new ValidCompactorTxnList(new long[]{3, 6}, bitSet, 3); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - } - - @Test - public void exceptionsInMidst() { - BitSet bitSet = new BitSet(1); - bitSet.set(0, 1); - ValidTxnList txns = new ValidCompactorTxnList(new long[]{8}, bitSet, 7); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - } - @Test - public void exceptionsAbveHighWaterMark() { - BitSet bitSet = new BitSet(4); - bitSet.set(0, 4); - ValidTxnList txns = new ValidCompactorTxnList(new long[]{8, 11, 17, 29}, bitSet, 15); - Assert.assertArrayEquals("", new long[]{8, 11}, txns.getInvalidTransactions()); - ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9); - Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp); - rsp = txns.isTxnRangeValid(12, 16); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - } - - @Test - public void writeToString() { - BitSet bitSet = new BitSet(4); - bitSet.set(0, 4); - ValidTxnList txns = new ValidCompactorTxnList(new long[]{7, 9, 10, Long.MAX_VALUE}, bitSet, 8); - Assert.assertEquals("8:" + Long.MAX_VALUE + "::7", txns.writeToString()); - txns = new ValidCompactorTxnList(); - Assert.assertEquals(Long.toString(Long.MAX_VALUE) + ":" + Long.MAX_VALUE + "::", txns.writeToString()); - txns = new ValidCompactorTxnList(new long[0], new BitSet(), 23); - Assert.assertEquals("23:" + Long.MAX_VALUE + "::", txns.writeToString()); - } - - @Test - public void readFromString() { - ValidCompactorTxnList txns = new ValidCompactorTxnList("37:" + Long.MAX_VALUE + "::7,9,10"); - Assert.assertEquals(37L, txns.getHighWatermark()); - Assert.assertNull(txns.getMinOpenTxn()); - Assert.assertArrayEquals(new long[]{7L, 9L, 10L}, txns.getInvalidTransactions()); - txns = new ValidCompactorTxnList("21:" + Long.MAX_VALUE + ":"); - Assert.assertEquals(21L, txns.getHighWatermark()); - Assert.assertNull(txns.getMinOpenTxn()); - Assert.assertEquals(0, txns.getInvalidTransactions().length); - } - - @Test - public void testAbortedTxn() throws Exception { - ValidCompactorTxnList txnList = new ValidCompactorTxnList("5:4::1,2,3"); - Assert.assertEquals(5L, txnList.getHighWatermark()); - Assert.assertEquals(4, txnList.getMinOpenTxn().longValue()); - Assert.assertArrayEquals(new long[]{1L, 2L, 3L}, txnList.getInvalidTransactions()); - } - - @Test - public void testAbortedRange() throws Exception { - ValidCompactorTxnList txnList = new ValidCompactorTxnList("11:4::5,6,7,8"); - ValidTxnList.RangeResponse rsp = txnList.isTxnRangeAborted(1L, 3L); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - rsp = txnList.isTxnRangeAborted(9L, 10L); - Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp); - rsp = txnList.isTxnRangeAborted(6L, 7L); - Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp); - rsp = txnList.isTxnRangeAborted(4L, 6L); - Assert.assertEquals(ValidTxnList.RangeResponse.SOME, rsp); - rsp = txnList.isTxnRangeAborted(6L, 13L); - Assert.assertEquals(ValidTxnList.RangeResponse.SOME, rsp); - } -} diff --git a/storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorWriteIdList.java b/storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorWriteIdList.java new file mode 100644 index 0000000..9530edb --- /dev/null +++ b/storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorWriteIdList.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.common; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.BitSet; + +public class TestValidCompactorWriteIdList { + private final String tableName = "t1"; + + @Test + public void minTxnHigh() { + BitSet bitSet = new BitSet(2); + bitSet.set(0, 2); + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[]{3, 4}, bitSet, 2); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + } + + @Test + public void maxTxnLow() { + BitSet bitSet = new BitSet(2); + bitSet.set(0, 2); + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[]{13, 14}, bitSet, 12); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.ALL, rsp); + } + + @Test + public void minTxnHighNoExceptions() { + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[0], new BitSet(), 5); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + } + + @Test + public void maxTxnLowNoExceptions() { + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[0], new BitSet(), 15); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.ALL, rsp); + } + + @Test + public void exceptionsAllBelow() { + BitSet bitSet = new BitSet(2); + bitSet.set(0, 2); + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[]{3, 6}, bitSet, 3); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + } + + @Test + public void exceptionsInMidst() { + BitSet bitSet = new BitSet(1); + bitSet.set(0, 1); + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[]{8}, bitSet, 7); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + } + @Test + public void exceptionsAbveHighWaterMark() { + BitSet bitSet = new BitSet(4); + bitSet.set(0, 4); + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[]{8, 11, 17, 29}, bitSet, 15); + Assert.assertArrayEquals("", new long[]{8, 11}, writeIds.getInvalidWriteIds()); + ValidWriteIdList.RangeResponse rsp = writeIds.isWriteIdRangeValid(7, 9); + Assert.assertEquals(ValidWriteIdList.RangeResponse.ALL, rsp); + rsp = writeIds.isWriteIdRangeValid(12, 16); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + } + + @Test + public void writeToString() { + BitSet bitSet = new BitSet(4); + bitSet.set(0, 4); + ValidWriteIdList writeIds = new ValidCompactorWriteIdList(tableName, new long[]{7, 9, 10, Long.MAX_VALUE}, bitSet, 8); + Assert.assertEquals(tableName + ":8:" + Long.MAX_VALUE + "::7", writeIds.writeToString()); + writeIds = new ValidCompactorWriteIdList(); + Assert.assertEquals("null:" + Long.toString(Long.MAX_VALUE) + ":" + Long.MAX_VALUE + "::", writeIds.writeToString()); + writeIds = new ValidCompactorWriteIdList(tableName, new long[0], new BitSet(), 23); + Assert.assertEquals(tableName + ":23:" + Long.MAX_VALUE + "::", writeIds.writeToString()); + } + + @Test + public void readFromString() { + ValidCompactorWriteIdList writeIds = new ValidCompactorWriteIdList(tableName + ":37:" + Long.MAX_VALUE + "::7,9,10"); + Assert.assertEquals(tableName, writeIds.getTableName()); + Assert.assertEquals(37L, writeIds.getHighWatermark()); + Assert.assertNull(writeIds.getMinOpenWriteId()); + Assert.assertArrayEquals(new long[]{7L, 9L, 10L}, writeIds.getInvalidWriteIds()); + writeIds = new ValidCompactorWriteIdList(tableName + ":21:" + Long.MAX_VALUE + ":"); + Assert.assertEquals(21L, writeIds.getHighWatermark()); + Assert.assertNull(writeIds.getMinOpenWriteId()); + Assert.assertEquals(0, writeIds.getInvalidWriteIds().length); + } + + @Test + public void testAbortedTxn() throws Exception { + ValidCompactorWriteIdList writeIdList = new ValidCompactorWriteIdList(tableName + ":5:4::1,2,3"); + Assert.assertEquals(5L, writeIdList.getHighWatermark()); + Assert.assertEquals(4, writeIdList.getMinOpenWriteId().longValue()); + Assert.assertArrayEquals(new long[]{1L, 2L, 3L}, writeIdList.getInvalidWriteIds()); + } + + @Test + public void testAbortedRange() throws Exception { + ValidCompactorWriteIdList writeIdList = new ValidCompactorWriteIdList(tableName + ":11:4::5,6,7,8"); + ValidWriteIdList.RangeResponse rsp = writeIdList.isWriteIdRangeAborted(1L, 3L); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + rsp = writeIdList.isWriteIdRangeAborted(9L, 10L); + Assert.assertEquals(ValidWriteIdList.RangeResponse.NONE, rsp); + rsp = writeIdList.isWriteIdRangeAborted(6L, 7L); + Assert.assertEquals(ValidWriteIdList.RangeResponse.ALL, rsp); + rsp = writeIdList.isWriteIdRangeAborted(4L, 6L); + Assert.assertEquals(ValidWriteIdList.RangeResponse.SOME, rsp); + rsp = writeIdList.isWriteIdRangeAborted(6L, 13L); + Assert.assertEquals(ValidWriteIdList.RangeResponse.SOME, rsp); + } +} diff --git a/storage-api/src/test/org/apache/hadoop/hive/common/TestValidReadTxnList.java b/storage-api/src/test/org/apache/hadoop/hive/common/TestValidReaderWriteIdList.java similarity index 51% rename from storage-api/src/test/org/apache/hadoop/hive/common/TestValidReadTxnList.java rename to storage-api/src/test/org/apache/hadoop/hive/common/TestValidReaderWriteIdList.java index f270eb8..68ffb44 100644 --- a/storage-api/src/test/org/apache/hadoop/hive/common/TestValidReadTxnList.java +++ b/storage-api/src/test/org/apache/hadoop/hive/common/TestValidReaderWriteIdList.java @@ -29,58 +29,59 @@ import java.util.BitSet; /** - * Tests for {@link ValidReadTxnList} + * Tests for {@link ValidReaderWriteIdList} */ -public class TestValidReadTxnList { +public class TestValidReaderWriteIdList { + private final String tableName = "t1"; @Test public void noExceptions() throws Exception { - ValidTxnList txnList = new ValidReadTxnList(new long[0], new BitSet(), 1, Long.MAX_VALUE); - String str = txnList.writeToString(); - Assert.assertEquals("1:" + Long.MAX_VALUE + "::", str); - ValidTxnList newList = new ValidReadTxnList(); + ValidWriteIdList writeIdList = new ValidReaderWriteIdList(tableName, new long[0], new BitSet(), 1, Long.MAX_VALUE); + String str = writeIdList.writeToString(); + Assert.assertEquals(tableName + ":1:" + Long.MAX_VALUE + "::", str); + ValidWriteIdList newList = new ValidReaderWriteIdList(); newList.readFromString(str); - Assert.assertTrue(newList.isTxnValid(1)); - Assert.assertFalse(newList.isTxnValid(2)); + Assert.assertTrue(newList.isWriteIdValid(1)); + Assert.assertFalse(newList.isWriteIdValid(2)); } @Test public void exceptions() throws Exception { - ValidTxnList txnList = new ValidReadTxnList(new long[]{2L,4L}, new BitSet(), 5, 4L); - String str = txnList.writeToString(); - Assert.assertEquals("5:4:2,4:", str); - ValidTxnList newList = new ValidReadTxnList(); + ValidWriteIdList writeIdList = new ValidReaderWriteIdList(tableName, new long[]{2L,4L}, new BitSet(), 5, 4L); + String str = writeIdList.writeToString(); + Assert.assertEquals(tableName + ":5:4:2,4:", str); + ValidWriteIdList newList = new ValidReaderWriteIdList(); newList.readFromString(str); - Assert.assertTrue(newList.isTxnValid(1)); - Assert.assertFalse(newList.isTxnValid(2)); - Assert.assertTrue(newList.isTxnValid(3)); - Assert.assertFalse(newList.isTxnValid(4)); - Assert.assertTrue(newList.isTxnValid(5)); - Assert.assertFalse(newList.isTxnValid(6)); + Assert.assertTrue(newList.isWriteIdValid(1)); + Assert.assertFalse(newList.isWriteIdValid(2)); + Assert.assertTrue(newList.isWriteIdValid(3)); + Assert.assertFalse(newList.isWriteIdValid(4)); + Assert.assertTrue(newList.isWriteIdValid(5)); + Assert.assertFalse(newList.isWriteIdValid(6)); } @Test public void longEnoughToCompress() throws Exception { long[] exceptions = new long[1000]; for (int i = 0; i < 1000; i++) exceptions[i] = i + 100; - ValidTxnList txnList = new ValidReadTxnList(exceptions, new BitSet(), 2000, 900); - String str = txnList.writeToString(); - ValidTxnList newList = new ValidReadTxnList(); + ValidWriteIdList writeIdList = new ValidReaderWriteIdList(tableName, exceptions, new BitSet(), 2000, 900); + String str = writeIdList.writeToString(); + ValidWriteIdList newList = new ValidReaderWriteIdList(); newList.readFromString(str); - for (int i = 0; i < 100; i++) Assert.assertTrue(newList.isTxnValid(i)); - for (int i = 100; i < 1100; i++) Assert.assertFalse(newList.isTxnValid(i)); - for (int i = 1100; i < 2001; i++) Assert.assertTrue(newList.isTxnValid(i)); - Assert.assertFalse(newList.isTxnValid(2001)); + for (int i = 0; i < 100; i++) Assert.assertTrue(newList.isWriteIdValid(i)); + for (int i = 100; i < 1100; i++) Assert.assertFalse(newList.isWriteIdValid(i)); + for (int i = 1100; i < 2001; i++) Assert.assertTrue(newList.isWriteIdValid(i)); + Assert.assertFalse(newList.isWriteIdValid(2001)); } @Test public void readWriteConfig() throws Exception { long[] exceptions = new long[1000]; for (int i = 0; i < 1000; i++) exceptions[i] = i + 100; - ValidTxnList txnList = new ValidReadTxnList(exceptions, new BitSet(), 2000, 900); - String str = txnList.writeToString(); + ValidWriteIdList writeIdList = new ValidReaderWriteIdList(tableName, exceptions, new BitSet(), 2000, 900); + String str = writeIdList.writeToString(); Configuration conf = new Configuration(); - conf.set(ValidTxnList.VALID_TXNS_KEY, str); + conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, str); File tmpFile = File.createTempFile("TestValidTxnImpl", "readWriteConfig"); DataOutputStream out = new DataOutputStream(new FileOutputStream(tmpFile)); conf.write(out); @@ -88,7 +89,7 @@ public void readWriteConfig() throws Exception { DataInputStream in = new DataInputStream(new FileInputStream(tmpFile)); Configuration newConf = new Configuration(); newConf.readFields(in); - Assert.assertEquals(str, newConf.get(ValidTxnList.VALID_TXNS_KEY)); + Assert.assertEquals(str, newConf.get(ValidWriteIdList.VALID_WRITEIDS_KEY)); } @Test @@ -97,13 +98,13 @@ public void testAbortedTxn() throws Exception { BitSet bitSet = new BitSet(exceptions.length); bitSet.set(0); // mark txn "2L" aborted bitSet.set(3); // mark txn "8L" aborted - ValidTxnList txnList = new ValidReadTxnList(exceptions, bitSet, 11, 4L); - String str = txnList.writeToString(); - Assert.assertEquals("11:4:4,6,10:2,8", str); - Assert.assertTrue(txnList.isTxnAborted(2L)); - Assert.assertFalse(txnList.isTxnAborted(4L)); - Assert.assertFalse(txnList.isTxnAborted(6L)); - Assert.assertTrue(txnList.isTxnAborted(8L)); - Assert.assertFalse(txnList.isTxnAborted(10L)); + ValidWriteIdList writeIdList = new ValidReaderWriteIdList(tableName, exceptions, bitSet, 11, 4L); + String str = writeIdList.writeToString(); + Assert.assertEquals(tableName + ":11:4:4,6,10:2,8", str); + Assert.assertTrue(writeIdList.isWriteIdAborted(2L)); + Assert.assertFalse(writeIdList.isWriteIdAborted(4L)); + Assert.assertFalse(writeIdList.isWriteIdAborted(6L)); + Assert.assertTrue(writeIdList.isWriteIdAborted(8L)); + Assert.assertFalse(writeIdList.isWriteIdAborted(10L)); } }