diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 54306234bf..6c8ebd017d 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -31,6 +31,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; @@ -243,6 +244,12 @@ public boolean dropType(String typeName) { return objectStore.dropType(typeName); } + @Override + public boolean updateLastAccessTime(Map> partsMap, int lastAccessTime) + throws MetaException, NoSuchObjectException { + return objectStore.updateLastAccessTime(partsMap, lastAccessTime); + } + @Override public void createTable(Table tbl) throws InvalidObjectException, MetaException { if (shouldEventSucceed) { diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java index e7d8e55695..9d2879c564 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java @@ -17,8 +17,14 @@ */ package org.apache.hadoop.hive.ql.hooks; +import java.util.HashSet; +import java.util.HashMap; +import java.util.Map; import java.util.Set; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -31,6 +37,7 @@ */ public class UpdateInputAccessTimeHook { + private static final Log LOG = LogFactory.getLog(UpdateInputAccessTimeHook.class); private static final String LAST_ACCESS_TIME = "lastAccessTime"; public static class PreExec implements ExecuteWithHookContext { @@ -50,6 +57,8 @@ public void run(HookContext hookContext) throws Exception { } int lastAccessTime = (int) (System.currentTimeMillis()/1000); + Set parts = null; + Map> partsMap = new HashMap>(); for(ReadEntity re: inputs) { // Set the last query time @@ -73,12 +82,12 @@ public void run(HookContext hookContext) throws Exception { String dbName = re.getTable().getDbName(); String tblName = re.getTable().getTableName(); Partition p = re.getPartition(); - Table t = db.getTable(dbName, tblName); - p = db.getPartition(t, p.getSpec(), false); - p.setLastAccessTime(lastAccessTime); - db.alterPartition(null, dbName, tblName, p, null, false); - t.setLastAccessTime(lastAccessTime); - db.alterTable(dbName + "." + tblName, t, false, null, false); + parts = partsMap.get(dbName + "." + tblName); + if (parts == null) { + parts = new HashSet(); + } + parts.add(p.getName()); + partsMap.put(dbName + "." + tblName, parts); break; } default: @@ -86,6 +95,8 @@ public void run(HookContext hookContext) throws Exception { break; } } + if (partsMap.size() > 0) + db.updateLastAccessTime(partsMap, lastAccessTime); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 769522d53c..05f00a94a7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1050,6 +1050,25 @@ public void createTable(Table tbl, boolean ifNotExists) throws HiveException { null, null); } + /** + * Updates the lastAccessTime on a set of partitions. + * + * @param partitionMap a map contain sets of partitions keyed by FQ table name. + * @param lastAccessTime the time value as integer to update for the sets. + * @throws InvalidOperationException if the changes in metadata is not acceptable + * @throws TException + */ + public void updateLastAccessTime(Map> partitionMap, int lastAccessTime) + throws InvalidOperationException, HiveException { + try { + getMSC().updateLastAccessTime(partitionMap, lastAccessTime); + } catch (MetaException e) { + throw new HiveException("Unable to update lastAccessTime for tables/partitions. " + e.getMessage(), e); + } catch (TException e) { + throw new HiveException("Unable to update lastAccessTime for tables/partitions. " + e.getMessage(), e); + } + } + public static List getFieldsFromDeserializerForMsStorage( Table tbl, Deserializer deserializer) throws SerDeException, MetaException { List schema = HiveMetaStoreUtils.getFieldsFromDeserializer( diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index f2781ce50f..f52f7a1bf2 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -176,6 +176,8 @@ public Partition get_partition(String db_name, String tbl_name, List part_vals) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public boolean update_last_access_time(Map> partitionsMap, int last_accesstime) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public Partition exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; public List exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; @@ -628,6 +630,8 @@ public void get_partition(String db_name, String tbl_name, List part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void update_last_access_time(Map> partitionsMap, int last_accesstime, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -2943,6 +2947,36 @@ public Partition recv_get_partition() throws MetaException, NoSuchObjectExceptio throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partition failed: unknown result"); } + public boolean update_last_access_time(Map> partitionsMap, int last_accesstime) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + send_update_last_access_time(partitionsMap, last_accesstime); + return recv_update_last_access_time(); + } + + public void send_update_last_access_time(Map> partitionsMap, int last_accesstime) throws org.apache.thrift.TException + { + update_last_access_time_args args = new update_last_access_time_args(); + args.setPartitionsMap(partitionsMap); + args.setLast_accesstime(last_accesstime); + sendBase("update_last_access_time", args); + } + + public boolean recv_update_last_access_time() throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + update_last_access_time_result result = new update_last_access_time_result(); + receiveBase(result, "update_last_access_time"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "update_last_access_time failed: unknown result"); + } + public Partition exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException { send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); @@ -9721,6 +9755,41 @@ public Partition getResult() throws MetaException, NoSuchObjectException, org.ap } } + public void update_last_access_time(Map> partitionsMap, int last_accesstime, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + update_last_access_time_call method_call = new update_last_access_time_call(partitionsMap, last_accesstime, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_last_access_time_call extends org.apache.thrift.async.TAsyncMethodCall { + private Map> partitionsMap; + private int last_accesstime; + public update_last_access_time_call(Map> partitionsMap, int last_accesstime, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.partitionsMap = partitionsMap; + this.last_accesstime = last_accesstime; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("update_last_access_time", org.apache.thrift.protocol.TMessageType.CALL, 0)); + update_last_access_time_args args = new update_last_access_time_args(); + args.setPartitionsMap(partitionsMap); + args.setLast_accesstime(last_accesstime); + args.write(prot); + prot.writeMessageEnd(); + } + + public boolean getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_update_last_access_time(); + } + } + public void exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); exchange_partition_call method_call = new exchange_partition_call(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, resultHandler, this, ___protocolFactory, ___transport); @@ -15113,6 +15182,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public update_last_access_time() { + super("update_last_access_time"); + } + + public update_last_access_time_args getEmptyArgsInstance() { + return new update_last_access_time_args(); + } + + protected boolean isOneway() { + return false; + } + + public update_last_access_time_result getResult(I iface, update_last_access_time_args args) throws org.apache.thrift.TException { + update_last_access_time_result result = new update_last_access_time_result(); + try { + result.success = iface.update_last_access_time(args.partitionsMap, args.last_accesstime); + result.setSuccessIsSet(true); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class exchange_partition extends org.apache.thrift.ProcessFunction { public exchange_partition() { super("exchange_partition"); @@ -21075,6 +21172,7 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { - public exchange_partition() { - super("exchange_partition"); - } - - public exchange_partition_args getEmptyArgsInstance() { - return new exchange_partition_args(); - } - - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Partition o) { - exchange_partition_result result = new exchange_partition_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - exchange_partition_result result = new exchange_partition_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; - result.setO2IsSet(true); - msg = result; - } - else if (e instanceof InvalidObjectException) { - result.o3 = (InvalidObjectException) e; - result.setO3IsSet(true); - msg = result; - } - else if (e instanceof InvalidInputException) { - result.o4 = (InvalidInputException) e; - result.setO4IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, exchange_partition_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.exchange_partition(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class exchange_partitions extends org.apache.thrift.AsyncProcessFunction> { - public exchange_partitions() { - super("exchange_partitions"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_last_access_time extends org.apache.thrift.AsyncProcessFunction { + public update_last_access_time() { + super("update_last_access_time"); } - public exchange_partitions_args getEmptyArgsInstance() { - return new exchange_partitions_args(); + public update_last_access_time_args getEmptyArgsInstance() { + return new update_last_access_time_args(); } - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - exchange_partitions_result result = new exchange_partitions_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - exchange_partitions_result result = new exchange_partitions_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; - result.setO2IsSet(true); - msg = result; - } - else if (e instanceof InvalidObjectException) { - result.o3 = (InvalidObjectException) e; - result.setO3IsSet(true); - msg = result; - } - else if (e instanceof InvalidInputException) { - result.o4 = (InvalidInputException) e; - result.setO4IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, exchange_partitions_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_with_auth extends org.apache.thrift.AsyncProcessFunction { - public get_partition_with_auth() { - super("get_partition_with_auth"); - } - - public get_partition_with_auth_args getEmptyArgsInstance() { - return new get_partition_with_auth_args(); - } - - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Partition o) { - get_partition_with_auth_result result = new get_partition_with_auth_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - get_partition_with_auth_result result = new get_partition_with_auth_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; - result.setO2IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, get_partition_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_by_name extends org.apache.thrift.AsyncProcessFunction { - public get_partition_by_name() { - super("get_partition_by_name"); - } - - public get_partition_by_name_args getEmptyArgsInstance() { - return new get_partition_by_name_args(); - } - - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Partition o) { - get_partition_by_name_result result = new get_partition_by_name_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - get_partition_by_name_result result = new get_partition_by_name_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; - result.setO2IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, get_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions extends org.apache.thrift.AsyncProcessFunction> { - public get_partitions() { - super("get_partitions"); - } - - public get_partitions_args getEmptyArgsInstance() { - return new get_partitions_args(); - } - - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - get_partitions_result result = new get_partitions_result(); + return new AsyncMethodCallback() { + public void onComplete(Boolean o) { + update_last_access_time_result result = new update_last_access_time_result(); result.success = o; + result.setSuccessIsSet(true); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -25731,7 +25562,7 @@ public void onComplete(List o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partitions_result result = new get_partitions_result(); + update_last_access_time_result result = new update_last_access_time_result(); if (e instanceof NoSuchObjectException) { result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); @@ -25762,25 +25593,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_partitions_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions(args.db_name, args.tbl_name, args.max_parts,resultHandler); + public void start(I iface, update_last_access_time_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.update_last_access_time(args.partitionsMap, args.last_accesstime,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_with_auth extends org.apache.thrift.AsyncProcessFunction> { - public get_partitions_with_auth() { - super("get_partitions_with_auth"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class exchange_partition extends org.apache.thrift.AsyncProcessFunction { + public exchange_partition() { + super("exchange_partition"); } - public get_partitions_with_auth_args getEmptyArgsInstance() { - return new get_partitions_with_auth_args(); + public exchange_partition_args getEmptyArgsInstance() { + return new exchange_partition_args(); } - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - get_partitions_with_auth_result result = new get_partitions_with_auth_result(); + return new AsyncMethodCallback() { + public void onComplete(Partition o) { + exchange_partition_result result = new exchange_partition_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -25793,16 +25624,26 @@ public void onComplete(List o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partitions_with_auth_result result = new get_partitions_with_auth_result(); - if (e instanceof NoSuchObjectException) { - result.o1 = (NoSuchObjectException) e; + exchange_partition_result result = new exchange_partition_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; result.setO2IsSet(true); msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o3 = (InvalidObjectException) e; + result.setO3IsSet(true); + msg = result; + } + else if (e instanceof InvalidInputException) { + result.o4 = (InvalidInputException) e; + result.setO4IsSet(true); + msg = result; } else { @@ -25824,25 +25665,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_partitions_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names,resultHandler); + public void start(I iface, exchange_partition_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.exchange_partition(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_pspec extends org.apache.thrift.AsyncProcessFunction> { - public get_partitions_pspec() { - super("get_partitions_pspec"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class exchange_partitions extends org.apache.thrift.AsyncProcessFunction> { + public exchange_partitions() { + super("exchange_partitions"); } - public get_partitions_pspec_args getEmptyArgsInstance() { - return new get_partitions_pspec_args(); + public exchange_partitions_args getEmptyArgsInstance() { + return new exchange_partitions_args(); } - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - get_partitions_pspec_result result = new get_partitions_pspec_result(); + return new AsyncMethodCallback>() { + public void onComplete(List o) { + exchange_partitions_result result = new exchange_partitions_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -25855,16 +25696,26 @@ public void onComplete(List o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partitions_pspec_result result = new get_partitions_pspec_result(); - if (e instanceof NoSuchObjectException) { - result.o1 = (NoSuchObjectException) e; + exchange_partitions_result result = new exchange_partitions_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; result.setO2IsSet(true); msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o3 = (InvalidObjectException) e; + result.setO3IsSet(true); + msg = result; + } + else if (e instanceof InvalidInputException) { + result.o4 = (InvalidInputException) e; + result.setO4IsSet(true); + msg = result; } else { @@ -25886,25 +25737,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts,resultHandler); + public void start(I iface, exchange_partitions_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_names extends org.apache.thrift.AsyncProcessFunction> { - public get_partition_names() { - super("get_partition_names"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_with_auth extends org.apache.thrift.AsyncProcessFunction { + public get_partition_with_auth() { + super("get_partition_with_auth"); } - public get_partition_names_args getEmptyArgsInstance() { - return new get_partition_names_args(); + public get_partition_with_auth_args getEmptyArgsInstance() { + return new get_partition_with_auth_args(); } - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - get_partition_names_result result = new get_partition_names_result(); + return new AsyncMethodCallback() { + public void onComplete(Partition o) { + get_partition_with_auth_result result = new get_partition_with_auth_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -25917,14 +25768,14 @@ public void onComplete(List o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partition_names_result result = new get_partition_names_result(); - if (e instanceof NoSuchObjectException) { - result.o1 = (NoSuchObjectException) e; + get_partition_with_auth_result result = new get_partition_with_auth_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; result.setO2IsSet(true); msg = result; } @@ -25948,25 +25799,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_partition_names_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts,resultHandler); + public void start(I iface, get_partition_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_values extends org.apache.thrift.AsyncProcessFunction { - public get_partition_values() { - super("get_partition_values"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_by_name extends org.apache.thrift.AsyncProcessFunction { + public get_partition_by_name() { + super("get_partition_by_name"); } - public get_partition_values_args getEmptyArgsInstance() { - return new get_partition_values_args(); + public get_partition_by_name_args getEmptyArgsInstance() { + return new get_partition_by_name_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(PartitionValuesResponse o) { - get_partition_values_result result = new get_partition_values_result(); + return new AsyncMethodCallback() { + public void onComplete(Partition o) { + get_partition_by_name_result result = new get_partition_by_name_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -25979,7 +25830,7 @@ public void onComplete(PartitionValuesResponse o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partition_values_result result = new get_partition_values_result(); + get_partition_by_name_result result = new get_partition_by_name_result(); if (e instanceof MetaException) { result.o1 = (MetaException) e; result.setO1IsSet(true); @@ -26010,25 +25861,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_partition_values_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partition_values(args.request,resultHandler); + public void start(I iface, get_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_ps extends org.apache.thrift.AsyncProcessFunction> { - public get_partitions_ps() { - super("get_partitions_ps"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions extends org.apache.thrift.AsyncProcessFunction> { + public get_partitions() { + super("get_partitions"); } - public get_partitions_ps_args getEmptyArgsInstance() { - return new get_partitions_ps_args(); + public get_partitions_args getEmptyArgsInstance() { + return new get_partitions_args(); } public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new AsyncMethodCallback>() { public void onComplete(List o) { - get_partitions_ps_result result = new get_partitions_ps_result(); + get_partitions_result result = new get_partitions_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -26041,14 +25892,14 @@ public void onComplete(List o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partitions_ps_result result = new get_partitions_ps_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; + get_partitions_result result = new get_partitions_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; result.setO2IsSet(true); msg = result; } @@ -26072,25 +25923,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_partitions_ps_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler); + public void start(I iface, get_partitions_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_partitions(args.db_name, args.tbl_name, args.max_parts,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_ps_with_auth extends org.apache.thrift.AsyncProcessFunction> { - public get_partitions_ps_with_auth() { - super("get_partitions_ps_with_auth"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_with_auth extends org.apache.thrift.AsyncProcessFunction> { + public get_partitions_with_auth() { + super("get_partitions_with_auth"); } - public get_partitions_ps_with_auth_args getEmptyArgsInstance() { - return new get_partitions_ps_with_auth_args(); + public get_partitions_with_auth_args getEmptyArgsInstance() { + return new get_partitions_with_auth_args(); } public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new AsyncMethodCallback>() { public void onComplete(List o) { - get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result(); + get_partitions_with_auth_result result = new get_partitions_with_auth_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -26103,7 +25954,7 @@ public void onComplete(List o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result(); + get_partitions_with_auth_result result = new get_partitions_with_auth_result(); if (e instanceof NoSuchObjectException) { result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); @@ -26134,25 +25985,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_partitions_ps_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names,resultHandler); + public void start(I iface, get_partitions_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_names_ps extends org.apache.thrift.AsyncProcessFunction> { - public get_partition_names_ps() { - super("get_partition_names_ps"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_pspec extends org.apache.thrift.AsyncProcessFunction> { + public get_partitions_pspec() { + super("get_partitions_pspec"); } - public get_partition_names_ps_args getEmptyArgsInstance() { - return new get_partition_names_ps_args(); + public get_partitions_pspec_args getEmptyArgsInstance() { + return new get_partitions_pspec_args(); } - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - get_partition_names_ps_result result = new get_partition_names_ps_result(); + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_partitions_pspec_result result = new get_partitions_pspec_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -26165,14 +26016,14 @@ public void onComplete(List o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partition_names_ps_result result = new get_partition_names_ps_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; + get_partitions_pspec_result result = new get_partitions_pspec_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; result.setO2IsSet(true); msg = result; } @@ -26196,25 +26047,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_partition_names_ps_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler); + public void start(I iface, get_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter extends org.apache.thrift.AsyncProcessFunction> { - public get_partitions_by_filter() { - super("get_partitions_by_filter"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_names extends org.apache.thrift.AsyncProcessFunction> { + public get_partition_names() { + super("get_partition_names"); } - public get_partitions_by_filter_args getEmptyArgsInstance() { - return new get_partitions_by_filter_args(); + public get_partition_names_args getEmptyArgsInstance() { + return new get_partition_names_args(); } - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - get_partitions_by_filter_result result = new get_partitions_by_filter_result(); + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_partition_names_result result = new get_partition_names_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -26227,14 +26078,14 @@ public void onComplete(List o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partitions_by_filter_result result = new get_partitions_by_filter_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; + get_partition_names_result result = new get_partition_names_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; result.setO2IsSet(true); msg = result; } @@ -26258,25 +26109,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); + public void start(I iface, get_partition_names_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter extends org.apache.thrift.AsyncProcessFunction> { - public get_part_specs_by_filter() { - super("get_part_specs_by_filter"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_values extends org.apache.thrift.AsyncProcessFunction { + public get_partition_values() { + super("get_partition_values"); } - public get_part_specs_by_filter_args getEmptyArgsInstance() { - return new get_part_specs_by_filter_args(); + public get_partition_values_args getEmptyArgsInstance() { + return new get_partition_values_args(); } - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); + return new AsyncMethodCallback() { + public void onComplete(PartitionValuesResponse o) { + get_partition_values_result result = new get_partition_values_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -26289,7 +26140,7 @@ public void onComplete(List o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); + get_partition_values_result result = new get_partition_values_result(); if (e instanceof MetaException) { result.o1 = (MetaException) e; result.setO1IsSet(true); @@ -26320,25 +26171,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_part_specs_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); + public void start(I iface, get_partition_values_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_partition_values(args.request,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr extends org.apache.thrift.AsyncProcessFunction { - public get_partitions_by_expr() { - super("get_partitions_by_expr"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_ps extends org.apache.thrift.AsyncProcessFunction> { + public get_partitions_ps() { + super("get_partitions_ps"); } - public get_partitions_by_expr_args getEmptyArgsInstance() { - return new get_partitions_by_expr_args(); + public get_partitions_ps_args getEmptyArgsInstance() { + return new get_partitions_ps_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(PartitionsByExprResult o) { - get_partitions_by_expr_result result = new get_partitions_by_expr_result(); + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_partitions_ps_result result = new get_partitions_ps_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -26351,7 +26202,7 @@ public void onComplete(PartitionsByExprResult o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partitions_by_expr_result result = new get_partitions_by_expr_result(); + get_partitions_ps_result result = new get_partitions_ps_result(); if (e instanceof MetaException) { result.o1 = (MetaException) e; result.setO1IsSet(true); @@ -26382,27 +26233,26 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_partitions_by_expr_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partitions_by_expr(args.req,resultHandler); + public void start(I iface, get_partitions_ps_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_num_partitions_by_filter extends org.apache.thrift.AsyncProcessFunction { - public get_num_partitions_by_filter() { - super("get_num_partitions_by_filter"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_ps_with_auth extends org.apache.thrift.AsyncProcessFunction> { + public get_partitions_ps_with_auth() { + super("get_partitions_ps_with_auth"); } - public get_num_partitions_by_filter_args getEmptyArgsInstance() { - return new get_num_partitions_by_filter_args(); + public get_partitions_ps_with_auth_args getEmptyArgsInstance() { + return new get_partitions_ps_with_auth_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Integer o) { - get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result(); result.success = o; - result.setSuccessIsSet(true); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -26414,14 +26264,14 @@ public void onComplete(Integer o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); - if (e instanceof MetaException) { - result.o1 = (MetaException) e; + get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof NoSuchObjectException) { - result.o2 = (NoSuchObjectException) e; + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; result.setO2IsSet(true); msg = result; } @@ -26445,25 +26295,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_num_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter,resultHandler); + public void start(I iface, get_partitions_ps_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_names extends org.apache.thrift.AsyncProcessFunction> { - public get_partitions_by_names() { - super("get_partitions_by_names"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_names_ps extends org.apache.thrift.AsyncProcessFunction> { + public get_partition_names_ps() { + super("get_partition_names_ps"); } - public get_partitions_by_names_args getEmptyArgsInstance() { - return new get_partitions_by_names_args(); + public get_partition_names_ps_args getEmptyArgsInstance() { + return new get_partition_names_ps_args(); } - public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback>() { - public void onComplete(List o) { - get_partitions_by_names_result result = new get_partitions_by_names_result(); + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_partition_names_ps_result result = new get_partition_names_ps_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -26476,7 +26326,7 @@ public void onComplete(List o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partitions_by_names_result result = new get_partitions_by_names_result(); + get_partition_names_ps_result result = new get_partition_names_ps_result(); if (e instanceof MetaException) { result.o1 = (MetaException) e; result.setO1IsSet(true); @@ -26507,25 +26357,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_partitions_by_names_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names,resultHandler); + public void start(I iface, get_partition_names_ps_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_names_req extends org.apache.thrift.AsyncProcessFunction { - public get_partitions_by_names_req() { - super("get_partitions_by_names_req"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_filter extends org.apache.thrift.AsyncProcessFunction> { + public get_partitions_by_filter() { + super("get_partitions_by_filter"); } - public get_partitions_by_names_req_args getEmptyArgsInstance() { - return new get_partitions_by_names_req_args(); + public get_partitions_by_filter_args getEmptyArgsInstance() { + return new get_partitions_by_filter_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(GetPartitionsByNamesResult o) { - get_partitions_by_names_req_result result = new get_partitions_by_names_req_result(); + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_partitions_by_filter_result result = new get_partitions_by_filter_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -26538,7 +26388,7 @@ public void onComplete(GetPartitionsByNamesResult o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_partitions_by_names_req_result result = new get_partitions_by_names_req_result(); + get_partitions_by_filter_result result = new get_partitions_by_filter_result(); if (e instanceof MetaException) { result.o1 = (MetaException) e; result.setO1IsSet(true); @@ -26569,25 +26419,26 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_partitions_by_names_req_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partitions_by_names_req(args.req,resultHandler); + public void start(I iface, get_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partition extends org.apache.thrift.AsyncProcessFunction { - public alter_partition() { - super("alter_partition"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_part_specs_by_filter extends org.apache.thrift.AsyncProcessFunction> { + public get_part_specs_by_filter() { + super("get_part_specs_by_filter"); } - public alter_partition_args getEmptyArgsInstance() { - return new alter_partition_args(); + public get_part_specs_by_filter_args getEmptyArgsInstance() { + return new get_part_specs_by_filter_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Void o) { - alter_partition_result result = new alter_partition_result(); + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); + result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -26599,14 +26450,14 @@ public void onComplete(Void o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - alter_partition_result result = new alter_partition_result(); - if (e instanceof InvalidOperationException) { - result.o1 = (InvalidOperationException) e; + get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; result.setO2IsSet(true); msg = result; } @@ -26630,25 +26481,26 @@ protected boolean isOneway() { return false; } - public void start(I iface, alter_partition_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.alter_partition(args.db_name, args.tbl_name, args.new_part,resultHandler); + public void start(I iface, get_part_specs_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions extends org.apache.thrift.AsyncProcessFunction { - public alter_partitions() { - super("alter_partitions"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr extends org.apache.thrift.AsyncProcessFunction { + public get_partitions_by_expr() { + super("get_partitions_by_expr"); } - public alter_partitions_args getEmptyArgsInstance() { - return new alter_partitions_args(); + public get_partitions_by_expr_args getEmptyArgsInstance() { + return new get_partitions_by_expr_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Void o) { - alter_partitions_result result = new alter_partitions_result(); + return new AsyncMethodCallback() { + public void onComplete(PartitionsByExprResult o) { + get_partitions_by_expr_result result = new get_partitions_by_expr_result(); + result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -26660,14 +26512,14 @@ public void onComplete(Void o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - alter_partitions_result result = new alter_partitions_result(); - if (e instanceof InvalidOperationException) { - result.o1 = (InvalidOperationException) e; + get_partitions_by_expr_result result = new get_partitions_by_expr_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; result.setO2IsSet(true); msg = result; } @@ -26691,25 +26543,27 @@ protected boolean isOneway() { return false; } - public void start(I iface, alter_partitions_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.alter_partitions(args.db_name, args.tbl_name, args.new_parts,resultHandler); + public void start(I iface, get_partitions_by_expr_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_partitions_by_expr(args.req,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context extends org.apache.thrift.AsyncProcessFunction { - public alter_partitions_with_environment_context() { - super("alter_partitions_with_environment_context"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_num_partitions_by_filter extends org.apache.thrift.AsyncProcessFunction { + public get_num_partitions_by_filter() { + super("get_num_partitions_by_filter"); } - public alter_partitions_with_environment_context_args getEmptyArgsInstance() { - return new alter_partitions_with_environment_context_args(); + public get_num_partitions_by_filter_args getEmptyArgsInstance() { + return new get_num_partitions_by_filter_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Void o) { - alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result(); + return new AsyncMethodCallback() { + public void onComplete(Integer o) { + get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); + result.success = o; + result.setSuccessIsSet(true); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -26721,14 +26575,14 @@ public void onComplete(Void o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result(); - if (e instanceof InvalidOperationException) { - result.o1 = (InvalidOperationException) e; + get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; result.setO2IsSet(true); msg = result; } @@ -26752,25 +26606,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, alter_partitions_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context,resultHandler); + public void start(I iface, get_num_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_req extends org.apache.thrift.AsyncProcessFunction { - public alter_partitions_req() { - super("alter_partitions_req"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_names extends org.apache.thrift.AsyncProcessFunction> { + public get_partitions_by_names() { + super("get_partitions_by_names"); } - public alter_partitions_req_args getEmptyArgsInstance() { - return new alter_partitions_req_args(); + public get_partitions_by_names_args getEmptyArgsInstance() { + return new get_partitions_by_names_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(AlterPartitionsResponse o) { - alter_partitions_req_result result = new alter_partitions_req_result(); + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_partitions_by_names_result result = new get_partitions_by_names_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -26783,14 +26637,14 @@ public void onComplete(AlterPartitionsResponse o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - alter_partitions_req_result result = new alter_partitions_req_result(); - if (e instanceof InvalidOperationException) { - result.o1 = (InvalidOperationException) e; + get_partitions_by_names_result result = new get_partitions_by_names_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; result.setO2IsSet(true); msg = result; } @@ -26814,25 +26668,26 @@ protected boolean isOneway() { return false; } - public void start(I iface, alter_partitions_req_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.alter_partitions_req(args.req,resultHandler); + public void start(I iface, get_partitions_by_names_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partition_with_environment_context extends org.apache.thrift.AsyncProcessFunction { - public alter_partition_with_environment_context() { - super("alter_partition_with_environment_context"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_names_req extends org.apache.thrift.AsyncProcessFunction { + public get_partitions_by_names_req() { + super("get_partitions_by_names_req"); } - public alter_partition_with_environment_context_args getEmptyArgsInstance() { - return new alter_partition_with_environment_context_args(); + public get_partitions_by_names_req_args getEmptyArgsInstance() { + return new get_partitions_by_names_req_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Void o) { - alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result(); + return new AsyncMethodCallback() { + public void onComplete(GetPartitionsByNamesResult o) { + get_partitions_by_names_req_result result = new get_partitions_by_names_req_result(); + result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -26844,14 +26699,320 @@ public void onComplete(Void o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result(); - if (e instanceof InvalidOperationException) { - result.o1 = (InvalidOperationException) e; + get_partitions_by_names_req_result result = new get_partitions_by_names_req_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_partitions_by_names_req_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_partitions_by_names_req(args.req,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partition extends org.apache.thrift.AsyncProcessFunction { + public alter_partition() { + super("alter_partition"); + } + + public alter_partition_args getEmptyArgsInstance() { + return new alter_partition_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + alter_partition_result result = new alter_partition_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + alter_partition_result result = new alter_partition_result(); + if (e instanceof InvalidOperationException) { + result.o1 = (InvalidOperationException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, alter_partition_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.alter_partition(args.db_name, args.tbl_name, args.new_part,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions extends org.apache.thrift.AsyncProcessFunction { + public alter_partitions() { + super("alter_partitions"); + } + + public alter_partitions_args getEmptyArgsInstance() { + return new alter_partitions_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + alter_partitions_result result = new alter_partitions_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + alter_partitions_result result = new alter_partitions_result(); + if (e instanceof InvalidOperationException) { + result.o1 = (InvalidOperationException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, alter_partitions_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.alter_partitions(args.db_name, args.tbl_name, args.new_parts,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context extends org.apache.thrift.AsyncProcessFunction { + public alter_partitions_with_environment_context() { + super("alter_partitions_with_environment_context"); + } + + public alter_partitions_with_environment_context_args getEmptyArgsInstance() { + return new alter_partitions_with_environment_context_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result(); + if (e instanceof InvalidOperationException) { + result.o1 = (InvalidOperationException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, alter_partitions_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_req extends org.apache.thrift.AsyncProcessFunction { + public alter_partitions_req() { + super("alter_partitions_req"); + } + + public alter_partitions_req_args getEmptyArgsInstance() { + return new alter_partitions_req_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(AlterPartitionsResponse o) { + alter_partitions_req_result result = new alter_partitions_req_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + alter_partitions_req_result result = new alter_partitions_req_result(); + if (e instanceof InvalidOperationException) { + result.o1 = (InvalidOperationException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, alter_partitions_req_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.alter_partitions_req(args.req,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partition_with_environment_context extends org.apache.thrift.AsyncProcessFunction { + public alter_partition_with_environment_context() { + super("alter_partition_with_environment_context"); + } + + public alter_partition_with_environment_context_args getEmptyArgsInstance() { + return new alter_partition_with_environment_context_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result(); + if (e instanceof InvalidOperationException) { + result.o1 = (InvalidOperationException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; result.setO2IsSet(true); msg = result; } @@ -103312,22 +103473,1212 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DropPartitionsResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DropPartitionsResult.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_partitions_req_result.class, metaDataMap); + } + + public drop_partitions_req_result() { + } + + public drop_partitions_req_result( + DropPartitionsResult success, + NoSuchObjectException o1, + MetaException o2) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public drop_partitions_req_result(drop_partitions_req_result other) { + if (other.isSetSuccess()) { + this.success = new DropPartitionsResult(other.success); + } + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public drop_partitions_req_result deepCopy() { + return new drop_partitions_req_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + } + + public DropPartitionsResult getSuccess() { + return this.success; + } + + public void setSuccess(DropPartitionsResult success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((DropPartitionsResult)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_partitions_req_result) + return this.equals((drop_partitions_req_result)that); + return false; + } + + public boolean equals(drop_partitions_req_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(drop_partitions_req_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_partitions_req_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class drop_partitions_req_resultStandardSchemeFactory implements SchemeFactory { + public drop_partitions_req_resultStandardScheme getScheme() { + return new drop_partitions_req_resultStandardScheme(); + } + } + + private static class drop_partitions_req_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partitions_req_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new DropPartitionsResult(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partitions_req_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class drop_partitions_req_resultTupleSchemeFactory implements SchemeFactory { + public drop_partitions_req_resultTupleScheme getScheme() { + return new drop_partitions_req_resultTupleScheme(); + } + } + + private static class drop_partitions_req_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, drop_partitions_req_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, drop_partitions_req_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.success = new DropPartitionsResult(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partition_args"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_partition_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partition_argsTupleSchemeFactory()); + } + + private String db_name; // required + private String tbl_name; // required + private List part_vals; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "db_name"), + TBL_NAME((short)2, "tbl_name"), + PART_VALS((short)3, "part_vals"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // PART_VALS + return PART_VALS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PART_VALS, new org.apache.thrift.meta_data.FieldMetaData("part_vals", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_args.class, metaDataMap); + } + + public get_partition_args() { + } + + public get_partition_args( + String db_name, + String tbl_name, + List part_vals) + { + this(); + this.db_name = db_name; + this.tbl_name = tbl_name; + this.part_vals = part_vals; + } + + /** + * Performs a deep copy on other. + */ + public get_partition_args(get_partition_args other) { + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + if (other.isSetTbl_name()) { + this.tbl_name = other.tbl_name; + } + if (other.isSetPart_vals()) { + List __this__part_vals = new ArrayList(other.part_vals); + this.part_vals = __this__part_vals; + } + } + + public get_partition_args deepCopy() { + return new get_partition_args(this); + } + + @Override + public void clear() { + this.db_name = null; + this.tbl_name = null; + this.part_vals = null; + } + + public String getDb_name() { + return this.db_name; + } + + public void setDb_name(String db_name) { + this.db_name = db_name; + } + + public void unsetDb_name() { + this.db_name = null; + } + + /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ + public boolean isSetDb_name() { + return this.db_name != null; + } + + public void setDb_nameIsSet(boolean value) { + if (!value) { + this.db_name = null; + } + } + + public String getTbl_name() { + return this.tbl_name; + } + + public void setTbl_name(String tbl_name) { + this.tbl_name = tbl_name; + } + + public void unsetTbl_name() { + this.tbl_name = null; + } + + /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_name() { + return this.tbl_name != null; + } + + public void setTbl_nameIsSet(boolean value) { + if (!value) { + this.tbl_name = null; + } + } + + public int getPart_valsSize() { + return (this.part_vals == null) ? 0 : this.part_vals.size(); + } + + public java.util.Iterator getPart_valsIterator() { + return (this.part_vals == null) ? null : this.part_vals.iterator(); + } + + public void addToPart_vals(String elem) { + if (this.part_vals == null) { + this.part_vals = new ArrayList(); + } + this.part_vals.add(elem); + } + + public List getPart_vals() { + return this.part_vals; + } + + public void setPart_vals(List part_vals) { + this.part_vals = part_vals; + } + + public void unsetPart_vals() { + this.part_vals = null; + } + + /** Returns true if field part_vals is set (has been assigned a value) and false otherwise */ + public boolean isSetPart_vals() { + return this.part_vals != null; + } + + public void setPart_valsIsSet(boolean value) { + if (!value) { + this.part_vals = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDb_name(); + } else { + setDb_name((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTbl_name(); + } else { + setTbl_name((String)value); + } + break; + + case PART_VALS: + if (value == null) { + unsetPart_vals(); + } else { + setPart_vals((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDb_name(); + + case TBL_NAME: + return getTbl_name(); + + case PART_VALS: + return getPart_vals(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDb_name(); + case TBL_NAME: + return isSetTbl_name(); + case PART_VALS: + return isSetPart_vals(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_partition_args) + return this.equals((get_partition_args)that); + return false; + } + + public boolean equals(get_partition_args that) { + if (that == null) + return false; + + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) + return false; + if (!this.db_name.equals(that.db_name)) + return false; + } + + boolean this_present_tbl_name = true && this.isSetTbl_name(); + boolean that_present_tbl_name = true && that.isSetTbl_name(); + if (this_present_tbl_name || that_present_tbl_name) { + if (!(this_present_tbl_name && that_present_tbl_name)) + return false; + if (!this.tbl_name.equals(that.tbl_name)) + return false; + } + + boolean this_present_part_vals = true && this.isSetPart_vals(); + boolean that_present_part_vals = true && that.isSetPart_vals(); + if (this_present_part_vals || that_present_part_vals) { + if (!(this_present_part_vals && that_present_part_vals)) + return false; + if (!this.part_vals.equals(that.part_vals)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_db_name = true && (isSetDb_name()); + list.add(present_db_name); + if (present_db_name) + list.add(db_name); + + boolean present_tbl_name = true && (isSetTbl_name()); + list.add(present_tbl_name); + if (present_tbl_name) + list.add(tbl_name); + + boolean present_part_vals = true && (isSetPart_vals()); + list.add(present_part_vals); + if (present_part_vals) + list.add(part_vals); + + return list.hashCode(); + } + + @Override + public int compareTo(get_partition_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDb_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTbl_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPart_vals()).compareTo(other.isSetPart_vals()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPart_vals()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.part_vals, other.part_vals); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_partition_args("); + boolean first = true; + + sb.append("db_name:"); + if (this.db_name == null) { + sb.append("null"); + } else { + sb.append(this.db_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_name:"); + if (this.tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.tbl_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("part_vals:"); + if (this.part_vals == null) { + sb.append("null"); + } else { + sb.append(this.part_vals); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_partition_argsStandardSchemeFactory implements SchemeFactory { + public get_partition_argsStandardScheme getScheme() { + return new get_partition_argsStandardScheme(); + } + } + + private static class get_partition_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // PART_VALS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1274 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1274.size); + String _elem1275; + for (int _i1276 = 0; _i1276 < _list1274.size; ++_i1276) + { + _elem1275 = iprot.readString(); + struct.part_vals.add(_elem1275); + } + iprot.readListEnd(); + } + struct.setPart_valsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.db_name); + oprot.writeFieldEnd(); + } + if (struct.tbl_name != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tbl_name); + oprot.writeFieldEnd(); + } + if (struct.part_vals != null) { + oprot.writeFieldBegin(PART_VALS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); + for (String _iter1277 : struct.part_vals) + { + oprot.writeString(_iter1277); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_partition_argsTupleSchemeFactory implements SchemeFactory { + public get_partition_argsTupleScheme getScheme() { + return new get_partition_argsTupleScheme(); + } + } + + private static class get_partition_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetDb_name()) { + optionals.set(0); + } + if (struct.isSetTbl_name()) { + optionals.set(1); + } + if (struct.isSetPart_vals()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetDb_name()) { + oprot.writeString(struct.db_name); + } + if (struct.isSetTbl_name()) { + oprot.writeString(struct.tbl_name); + } + if (struct.isSetPart_vals()) { + { + oprot.writeI32(struct.part_vals.size()); + for (String _iter1278 : struct.part_vals) + { + oprot.writeString(_iter1278); + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } + if (incoming.get(1)) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } + if (incoming.get(2)) { + { + org.apache.thrift.protocol.TList _list1279 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1279.size); + String _elem1280; + for (int _i1281 = 0; _i1281 < _list1279.size; ++_i1281) + { + _elem1280 = iprot.readString(); + struct.part_vals.add(_elem1280); + } + } + struct.setPart_valsIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partition_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_partition_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partition_resultTupleSchemeFactory()); + } + + private Partition success; // required + private MetaException o1; // required + private NoSuchObjectException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_partitions_req_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_result.class, metaDataMap); } - public drop_partitions_req_result() { + public get_partition_result() { } - public drop_partitions_req_result( - DropPartitionsResult success, - NoSuchObjectException o1, - MetaException o2) + public get_partition_result( + Partition success, + MetaException o1, + NoSuchObjectException o2) { this(); this.success = success; @@ -103338,20 +104689,20 @@ public drop_partitions_req_result( /** * Performs a deep copy on other. */ - public drop_partitions_req_result(drop_partitions_req_result other) { + public get_partition_result(get_partition_result other) { if (other.isSetSuccess()) { - this.success = new DropPartitionsResult(other.success); + this.success = new Partition(other.success); } if (other.isSetO1()) { - this.o1 = new NoSuchObjectException(other.o1); + this.o1 = new MetaException(other.o1); } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new NoSuchObjectException(other.o2); } } - public drop_partitions_req_result deepCopy() { - return new drop_partitions_req_result(this); + public get_partition_result deepCopy() { + return new get_partition_result(this); } @Override @@ -103361,11 +104712,11 @@ public void clear() { this.o2 = null; } - public DropPartitionsResult getSuccess() { + public Partition getSuccess() { return this.success; } - public void setSuccess(DropPartitionsResult success) { + public void setSuccess(Partition success) { this.success = success; } @@ -103384,11 +104735,11 @@ public void setSuccessIsSet(boolean value) { } } - public NoSuchObjectException getO1() { + public MetaException getO1() { return this.o1; } - public void setO1(NoSuchObjectException o1) { + public void setO1(MetaException o1) { this.o1 = o1; } @@ -103407,11 +104758,11 @@ public void setO1IsSet(boolean value) { } } - public MetaException getO2() { + public NoSuchObjectException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(NoSuchObjectException o2) { this.o2 = o2; } @@ -103436,7 +104787,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((DropPartitionsResult)value); + setSuccess((Partition)value); } break; @@ -103444,7 +104795,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO1(); } else { - setO1((NoSuchObjectException)value); + setO1((MetaException)value); } break; @@ -103452,7 +104803,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((NoSuchObjectException)value); } break; @@ -103495,12 +104846,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_partitions_req_result) - return this.equals((drop_partitions_req_result)that); + if (that instanceof get_partition_result) + return this.equals((get_partition_result)that); return false; } - public boolean equals(drop_partitions_req_result that) { + public boolean equals(get_partition_result that) { if (that == null) return false; @@ -103557,7 +104908,7 @@ public int hashCode() { } @Override - public int compareTo(drop_partitions_req_result other) { + public int compareTo(get_partition_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -103611,7 +104962,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_partitions_req_result("); + StringBuilder sb = new StringBuilder("get_partition_result("); boolean first = true; sb.append("success:"); @@ -103665,15 +105016,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class drop_partitions_req_resultStandardSchemeFactory implements SchemeFactory { - public drop_partitions_req_resultStandardScheme getScheme() { - return new drop_partitions_req_resultStandardScheme(); + private static class get_partition_resultStandardSchemeFactory implements SchemeFactory { + public get_partition_resultStandardScheme getScheme() { + return new get_partition_resultStandardScheme(); } } - private static class drop_partitions_req_resultStandardScheme extends StandardScheme { + private static class get_partition_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partitions_req_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -103685,7 +105036,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partitions_req switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new DropPartitionsResult(); + struct.success = new Partition(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -103694,7 +105045,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partitions_req break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new NoSuchObjectException(); + struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -103703,7 +105054,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partitions_req break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new MetaException(); + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { @@ -103719,7 +105070,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partitions_req struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partitions_req_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -103744,16 +105095,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partitions_re } - private static class drop_partitions_req_resultTupleSchemeFactory implements SchemeFactory { - public drop_partitions_req_resultTupleScheme getScheme() { - return new drop_partitions_req_resultTupleScheme(); + private static class get_partition_resultTupleSchemeFactory implements SchemeFactory { + public get_partition_resultTupleScheme getScheme() { + return new get_partition_resultTupleScheme(); } } - private static class drop_partitions_req_resultTupleScheme extends TupleScheme { + private static class get_partition_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, drop_partitions_req_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -103778,21 +105129,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partitions_req } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, drop_partitions_req_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = new DropPartitionsResult(); + struct.success = new Partition(); struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new NoSuchObjectException(); + struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); } if (incoming.get(2)) { - struct.o2 = new MetaException(); + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } @@ -103801,28 +105152,25 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partitions_req_ } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partition_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_last_access_time_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_last_access_time_args"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField PARTITIONS_MAP_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionsMap", org.apache.thrift.protocol.TType.MAP, (short)1); + private static final org.apache.thrift.protocol.TField LAST_ACCESSTIME_FIELD_DESC = new org.apache.thrift.protocol.TField("last_accesstime", org.apache.thrift.protocol.TType.I32, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_partition_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_partition_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new update_last_access_time_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_last_access_time_argsTupleSchemeFactory()); } - private String db_name; // required - private String tbl_name; // required - private List part_vals; // required + private Map> partitionsMap; // required + private int last_accesstime; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"), - PART_VALS((short)3, "part_vals"); + PARTITIONS_MAP((short)1, "partitionsMap"), + LAST_ACCESSTIME((short)2, "last_accesstime"); private static final Map byName = new HashMap(); @@ -103837,12 +105185,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partitions_req_ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME - return DB_NAME; - case 2: // TBL_NAME - return TBL_NAME; - case 3: // PART_VALS - return PART_VALS; + case 1: // PARTITIONS_MAP + return PARTITIONS_MAP; + case 2: // LAST_ACCESSTIME + return LAST_ACCESSTIME; default: return null; } @@ -103883,168 +105229,140 @@ public String getFieldName() { } // isset id assignments + private static final int __LAST_ACCESSTIME_ISSET_ID = 0; + private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.PART_VALS, new org.apache.thrift.meta_data.FieldMetaData("part_vals", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.PARTITIONS_MAP, new org.apache.thrift.meta_data.FieldMetaData("partitionsMap", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))))); + tmpMap.put(_Fields.LAST_ACCESSTIME, new org.apache.thrift.meta_data.FieldMetaData("last_accesstime", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_last_access_time_args.class, metaDataMap); } - public get_partition_args() { + public update_last_access_time_args() { } - public get_partition_args( - String db_name, - String tbl_name, - List part_vals) + public update_last_access_time_args( + Map> partitionsMap, + int last_accesstime) { this(); - this.db_name = db_name; - this.tbl_name = tbl_name; - this.part_vals = part_vals; + this.partitionsMap = partitionsMap; + this.last_accesstime = last_accesstime; + setLast_accesstimeIsSet(true); } /** * Performs a deep copy on other. */ - public get_partition_args(get_partition_args other) { - if (other.isSetDb_name()) { - this.db_name = other.db_name; - } - if (other.isSetTbl_name()) { - this.tbl_name = other.tbl_name; - } - if (other.isSetPart_vals()) { - List __this__part_vals = new ArrayList(other.part_vals); - this.part_vals = __this__part_vals; - } - } + public update_last_access_time_args(update_last_access_time_args other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetPartitionsMap()) { + Map> __this__partitionsMap = new HashMap>(other.partitionsMap.size()); + for (Map.Entry> other_element : other.partitionsMap.entrySet()) { - public get_partition_args deepCopy() { - return new get_partition_args(this); - } + String other_element_key = other_element.getKey(); + Set other_element_value = other_element.getValue(); - @Override - public void clear() { - this.db_name = null; - this.tbl_name = null; - this.part_vals = null; - } + String __this__partitionsMap_copy_key = other_element_key; - public String getDb_name() { - return this.db_name; + Set __this__partitionsMap_copy_value = new HashSet(other_element_value); + + __this__partitionsMap.put(__this__partitionsMap_copy_key, __this__partitionsMap_copy_value); + } + this.partitionsMap = __this__partitionsMap; + } + this.last_accesstime = other.last_accesstime; } - public void setDb_name(String db_name) { - this.db_name = db_name; + public update_last_access_time_args deepCopy() { + return new update_last_access_time_args(this); } - public void unsetDb_name() { - this.db_name = null; + @Override + public void clear() { + this.partitionsMap = null; + setLast_accesstimeIsSet(false); + this.last_accesstime = 0; } - /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ - public boolean isSetDb_name() { - return this.db_name != null; + public int getPartitionsMapSize() { + return (this.partitionsMap == null) ? 0 : this.partitionsMap.size(); } - public void setDb_nameIsSet(boolean value) { - if (!value) { - this.db_name = null; + public void putToPartitionsMap(String key, Set val) { + if (this.partitionsMap == null) { + this.partitionsMap = new HashMap>(); } + this.partitionsMap.put(key, val); } - public String getTbl_name() { - return this.tbl_name; + public Map> getPartitionsMap() { + return this.partitionsMap; } - public void setTbl_name(String tbl_name) { - this.tbl_name = tbl_name; + public void setPartitionsMap(Map> partitionsMap) { + this.partitionsMap = partitionsMap; } - public void unsetTbl_name() { - this.tbl_name = null; + public void unsetPartitionsMap() { + this.partitionsMap = null; } - /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ - public boolean isSetTbl_name() { - return this.tbl_name != null; + /** Returns true if field partitionsMap is set (has been assigned a value) and false otherwise */ + public boolean isSetPartitionsMap() { + return this.partitionsMap != null; } - public void setTbl_nameIsSet(boolean value) { + public void setPartitionsMapIsSet(boolean value) { if (!value) { - this.tbl_name = null; + this.partitionsMap = null; } } - public int getPart_valsSize() { - return (this.part_vals == null) ? 0 : this.part_vals.size(); - } - - public java.util.Iterator getPart_valsIterator() { - return (this.part_vals == null) ? null : this.part_vals.iterator(); - } - - public void addToPart_vals(String elem) { - if (this.part_vals == null) { - this.part_vals = new ArrayList(); - } - this.part_vals.add(elem); + public int getLast_accesstime() { + return this.last_accesstime; } - public List getPart_vals() { - return this.part_vals; - } - - public void setPart_vals(List part_vals) { - this.part_vals = part_vals; + public void setLast_accesstime(int last_accesstime) { + this.last_accesstime = last_accesstime; + setLast_accesstimeIsSet(true); } - public void unsetPart_vals() { - this.part_vals = null; + public void unsetLast_accesstime() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LAST_ACCESSTIME_ISSET_ID); } - /** Returns true if field part_vals is set (has been assigned a value) and false otherwise */ - public boolean isSetPart_vals() { - return this.part_vals != null; + /** Returns true if field last_accesstime is set (has been assigned a value) and false otherwise */ + public boolean isSetLast_accesstime() { + return EncodingUtils.testBit(__isset_bitfield, __LAST_ACCESSTIME_ISSET_ID); } - public void setPart_valsIsSet(boolean value) { - if (!value) { - this.part_vals = null; - } + public void setLast_accesstimeIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LAST_ACCESSTIME_ISSET_ID, value); } public void setFieldValue(_Fields field, Object value) { switch (field) { - case DB_NAME: + case PARTITIONS_MAP: if (value == null) { - unsetDb_name(); + unsetPartitionsMap(); } else { - setDb_name((String)value); + setPartitionsMap((Map>)value); } break; - case TBL_NAME: + case LAST_ACCESSTIME: if (value == null) { - unsetTbl_name(); + unsetLast_accesstime(); } else { - setTbl_name((String)value); - } - break; - - case PART_VALS: - if (value == null) { - unsetPart_vals(); - } else { - setPart_vals((List)value); + setLast_accesstime((Integer)value); } break; @@ -104053,14 +105371,11 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case DB_NAME: - return getDb_name(); - - case TBL_NAME: - return getTbl_name(); + case PARTITIONS_MAP: + return getPartitionsMap(); - case PART_VALS: - return getPart_vals(); + case LAST_ACCESSTIME: + return getLast_accesstime(); } throw new IllegalStateException(); @@ -104073,12 +105388,10 @@ public boolean isSet(_Fields field) { } switch (field) { - case DB_NAME: - return isSetDb_name(); - case TBL_NAME: - return isSetTbl_name(); - case PART_VALS: - return isSetPart_vals(); + case PARTITIONS_MAP: + return isSetPartitionsMap(); + case LAST_ACCESSTIME: + return isSetLast_accesstime(); } throw new IllegalStateException(); } @@ -104087,39 +105400,30 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_partition_args) - return this.equals((get_partition_args)that); + if (that instanceof update_last_access_time_args) + return this.equals((update_last_access_time_args)that); return false; } - public boolean equals(get_partition_args that) { + public boolean equals(update_last_access_time_args that) { if (that == null) return false; - boolean this_present_db_name = true && this.isSetDb_name(); - boolean that_present_db_name = true && that.isSetDb_name(); - if (this_present_db_name || that_present_db_name) { - if (!(this_present_db_name && that_present_db_name)) + boolean this_present_partitionsMap = true && this.isSetPartitionsMap(); + boolean that_present_partitionsMap = true && that.isSetPartitionsMap(); + if (this_present_partitionsMap || that_present_partitionsMap) { + if (!(this_present_partitionsMap && that_present_partitionsMap)) return false; - if (!this.db_name.equals(that.db_name)) + if (!this.partitionsMap.equals(that.partitionsMap)) return false; } - boolean this_present_tbl_name = true && this.isSetTbl_name(); - boolean that_present_tbl_name = true && that.isSetTbl_name(); - if (this_present_tbl_name || that_present_tbl_name) { - if (!(this_present_tbl_name && that_present_tbl_name)) + boolean this_present_last_accesstime = true; + boolean that_present_last_accesstime = true; + if (this_present_last_accesstime || that_present_last_accesstime) { + if (!(this_present_last_accesstime && that_present_last_accesstime)) return false; - if (!this.tbl_name.equals(that.tbl_name)) - return false; - } - - boolean this_present_part_vals = true && this.isSetPart_vals(); - boolean that_present_part_vals = true && that.isSetPart_vals(); - if (this_present_part_vals || that_present_part_vals) { - if (!(this_present_part_vals && that_present_part_vals)) - return false; - if (!this.part_vals.equals(that.part_vals)) + if (this.last_accesstime != that.last_accesstime) return false; } @@ -104130,58 +105434,43 @@ public boolean equals(get_partition_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_db_name = true && (isSetDb_name()); - list.add(present_db_name); - if (present_db_name) - list.add(db_name); - - boolean present_tbl_name = true && (isSetTbl_name()); - list.add(present_tbl_name); - if (present_tbl_name) - list.add(tbl_name); + boolean present_partitionsMap = true && (isSetPartitionsMap()); + list.add(present_partitionsMap); + if (present_partitionsMap) + list.add(partitionsMap); - boolean present_part_vals = true && (isSetPart_vals()); - list.add(present_part_vals); - if (present_part_vals) - list.add(part_vals); + boolean present_last_accesstime = true; + list.add(present_last_accesstime); + if (present_last_accesstime) + list.add(last_accesstime); return list.hashCode(); } @Override - public int compareTo(get_partition_args other) { + public int compareTo(update_last_access_time_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); + lastComparison = Boolean.valueOf(isSetPartitionsMap()).compareTo(other.isSetPartitionsMap()); if (lastComparison != 0) { return lastComparison; } - if (isSetDb_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); + if (isSetPartitionsMap()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionsMap, other.partitionsMap); if (lastComparison != 0) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); + lastComparison = Boolean.valueOf(isSetLast_accesstime()).compareTo(other.isSetLast_accesstime()); if (lastComparison != 0) { return lastComparison; } - if (isSetTbl_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetPart_vals()).compareTo(other.isSetPart_vals()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetPart_vals()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.part_vals, other.part_vals); + if (isSetLast_accesstime()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.last_accesstime, other.last_accesstime); if (lastComparison != 0) { return lastComparison; } @@ -104203,31 +105492,19 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_partition_args("); + StringBuilder sb = new StringBuilder("update_last_access_time_args("); boolean first = true; - sb.append("db_name:"); - if (this.db_name == null) { - sb.append("null"); - } else { - sb.append(this.db_name); - } - first = false; - if (!first) sb.append(", "); - sb.append("tbl_name:"); - if (this.tbl_name == null) { + sb.append("partitionsMap:"); + if (this.partitionsMap == null) { sb.append("null"); } else { - sb.append(this.tbl_name); + sb.append(this.partitionsMap); } first = false; if (!first) sb.append(", "); - sb.append("part_vals:"); - if (this.part_vals == null) { - sb.append("null"); - } else { - sb.append(this.part_vals); - } + sb.append("last_accesstime:"); + sb.append(this.last_accesstime); first = false; sb.append(")"); return sb.toString(); @@ -104248,21 +105525,23 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class get_partition_argsStandardSchemeFactory implements SchemeFactory { - public get_partition_argsStandardScheme getScheme() { - return new get_partition_argsStandardScheme(); + private static class update_last_access_time_argsStandardSchemeFactory implements SchemeFactory { + public update_last_access_time_argsStandardScheme getScheme() { + return new update_last_access_time_argsStandardScheme(); } } - private static class get_partition_argsStandardScheme extends StandardScheme { + private static class update_last_access_time_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, update_last_access_time_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -104272,36 +105551,40 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args break; } switch (schemeField.id) { - case 1: // DB_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // TBL_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tbl_name = iprot.readString(); - struct.setTbl_nameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // PART_VALS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + case 1: // PARTITIONS_MAP + if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TList _list1274 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1274.size); - String _elem1275; - for (int _i1276 = 0; _i1276 < _list1274.size; ++_i1276) + org.apache.thrift.protocol.TMap _map1282 = iprot.readMapBegin(); + struct.partitionsMap = new HashMap>(2*_map1282.size); + String _key1283; + Set _val1284; + for (int _i1285 = 0; _i1285 < _map1282.size; ++_i1285) { - _elem1275 = iprot.readString(); - struct.part_vals.add(_elem1275); + _key1283 = iprot.readString(); + { + org.apache.thrift.protocol.TSet _set1286 = iprot.readSetBegin(); + _val1284 = new HashSet(2*_set1286.size); + String _elem1287; + for (int _i1288 = 0; _i1288 < _set1286.size; ++_i1288) + { + _elem1287 = iprot.readString(); + _val1284.add(_elem1287); + } + iprot.readSetEnd(); + } + struct.partitionsMap.put(_key1283, _val1284); } - iprot.readListEnd(); + iprot.readMapEnd(); } - struct.setPart_valsIsSet(true); + struct.setPartitionsMapIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // LAST_ACCESSTIME + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.last_accesstime = iprot.readI32(); + struct.setLast_accesstimeIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -104315,123 +105598,132 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, update_last_access_time_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.db_name != null) { - oprot.writeFieldBegin(DB_NAME_FIELD_DESC); - oprot.writeString(struct.db_name); - oprot.writeFieldEnd(); - } - if (struct.tbl_name != null) { - oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); - oprot.writeString(struct.tbl_name); - oprot.writeFieldEnd(); - } - if (struct.part_vals != null) { - oprot.writeFieldBegin(PART_VALS_FIELD_DESC); + if (struct.partitionsMap != null) { + oprot.writeFieldBegin(PARTITIONS_MAP_FIELD_DESC); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1277 : struct.part_vals) + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.SET, struct.partitionsMap.size())); + for (Map.Entry> _iter1289 : struct.partitionsMap.entrySet()) { - oprot.writeString(_iter1277); + oprot.writeString(_iter1289.getKey()); + { + oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, _iter1289.getValue().size())); + for (String _iter1290 : _iter1289.getValue()) + { + oprot.writeString(_iter1290); + } + oprot.writeSetEnd(); + } } - oprot.writeListEnd(); + oprot.writeMapEnd(); } oprot.writeFieldEnd(); } + oprot.writeFieldBegin(LAST_ACCESSTIME_FIELD_DESC); + oprot.writeI32(struct.last_accesstime); + oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_partition_argsTupleSchemeFactory implements SchemeFactory { - public get_partition_argsTupleScheme getScheme() { - return new get_partition_argsTupleScheme(); + private static class update_last_access_time_argsTupleSchemeFactory implements SchemeFactory { + public update_last_access_time_argsTupleScheme getScheme() { + return new update_last_access_time_argsTupleScheme(); } } - private static class get_partition_argsTupleScheme extends TupleScheme { + private static class update_last_access_time_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, update_last_access_time_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDb_name()) { + if (struct.isSetPartitionsMap()) { optionals.set(0); } - if (struct.isSetTbl_name()) { + if (struct.isSetLast_accesstime()) { optionals.set(1); } - if (struct.isSetPart_vals()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); - if (struct.isSetDb_name()) { - oprot.writeString(struct.db_name); - } - if (struct.isSetTbl_name()) { - oprot.writeString(struct.tbl_name); - } - if (struct.isSetPart_vals()) { + oprot.writeBitSet(optionals, 2); + if (struct.isSetPartitionsMap()) { { - oprot.writeI32(struct.part_vals.size()); - for (String _iter1278 : struct.part_vals) + oprot.writeI32(struct.partitionsMap.size()); + for (Map.Entry> _iter1291 : struct.partitionsMap.entrySet()) { - oprot.writeString(_iter1278); + oprot.writeString(_iter1291.getKey()); + { + oprot.writeI32(_iter1291.getValue().size()); + for (String _iter1292 : _iter1291.getValue()) + { + oprot.writeString(_iter1292); + } + } } } } + if (struct.isSetLast_accesstime()) { + oprot.writeI32(struct.last_accesstime); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, update_last_access_time_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); - } - if (incoming.get(1)) { - struct.tbl_name = iprot.readString(); - struct.setTbl_nameIsSet(true); - } - if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1279 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1279.size); - String _elem1280; - for (int _i1281 = 0; _i1281 < _list1279.size; ++_i1281) + org.apache.thrift.protocol.TMap _map1293 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.SET, iprot.readI32()); + struct.partitionsMap = new HashMap>(2*_map1293.size); + String _key1294; + Set _val1295; + for (int _i1296 = 0; _i1296 < _map1293.size; ++_i1296) { - _elem1280 = iprot.readString(); - struct.part_vals.add(_elem1280); + _key1294 = iprot.readString(); + { + org.apache.thrift.protocol.TSet _set1297 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + _val1295 = new HashSet(2*_set1297.size); + String _elem1298; + for (int _i1299 = 0; _i1299 < _set1297.size; ++_i1299) + { + _elem1298 = iprot.readString(); + _val1295.add(_elem1298); + } + } + struct.partitionsMap.put(_key1294, _val1295); } } - struct.setPart_valsIsSet(true); + struct.setPartitionsMapIsSet(true); + } + if (incoming.get(1)) { + struct.last_accesstime = iprot.readI32(); + struct.setLast_accesstimeIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partition_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partition_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_last_access_time_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_last_access_time_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_partition_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_partition_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new update_last_access_time_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new update_last_access_time_resultTupleSchemeFactory()); } - private Partition success; // required - private MetaException o1; // required - private NoSuchObjectException o2; // required + private boolean success; // required + private NoSuchObjectException o1; // required + private MetaException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -104498,29 +105790,32 @@ public String getFieldName() { } // isset id assignments + private static final int __SUCCESS_ISSET_ID = 0; + private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))); + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_last_access_time_result.class, metaDataMap); } - public get_partition_result() { + public update_last_access_time_result() { } - public get_partition_result( - Partition success, - MetaException o1, - NoSuchObjectException o2) + public update_last_access_time_result( + boolean success, + NoSuchObjectException o1, + MetaException o2) { this(); this.success = success; + setSuccessIsSet(true); this.o1 = o1; this.o2 = o2; } @@ -104528,57 +105823,56 @@ public get_partition_result( /** * Performs a deep copy on other. */ - public get_partition_result(get_partition_result other) { - if (other.isSetSuccess()) { - this.success = new Partition(other.success); - } + public update_last_access_time_result(update_last_access_time_result other) { + __isset_bitfield = other.__isset_bitfield; + this.success = other.success; if (other.isSetO1()) { - this.o1 = new MetaException(other.o1); + this.o1 = new NoSuchObjectException(other.o1); } if (other.isSetO2()) { - this.o2 = new NoSuchObjectException(other.o2); + this.o2 = new MetaException(other.o2); } } - public get_partition_result deepCopy() { - return new get_partition_result(this); + public update_last_access_time_result deepCopy() { + return new update_last_access_time_result(this); } @Override public void clear() { - this.success = null; + setSuccessIsSet(false); + this.success = false; this.o1 = null; this.o2 = null; } - public Partition getSuccess() { + public boolean isSuccess() { return this.success; } - public void setSuccess(Partition success) { + public void setSuccess(boolean success) { this.success = success; + setSuccessIsSet(true); } public void unsetSuccess() { - this.success = null; + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); } /** Returns true if field success is set (has been assigned a value) and false otherwise */ public boolean isSetSuccess() { - return this.success != null; + return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); } public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); } - public MetaException getO1() { + public NoSuchObjectException getO1() { return this.o1; } - public void setO1(MetaException o1) { + public void setO1(NoSuchObjectException o1) { this.o1 = o1; } @@ -104597,11 +105891,11 @@ public void setO1IsSet(boolean value) { } } - public NoSuchObjectException getO2() { + public MetaException getO2() { return this.o2; } - public void setO2(NoSuchObjectException o2) { + public void setO2(MetaException o2) { this.o2 = o2; } @@ -104626,7 +105920,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((Partition)value); + setSuccess((Boolean)value); } break; @@ -104634,7 +105928,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO1(); } else { - setO1((MetaException)value); + setO1((NoSuchObjectException)value); } break; @@ -104642,7 +105936,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((NoSuchObjectException)value); + setO2((MetaException)value); } break; @@ -104652,7 +105946,7 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { case SUCCESS: - return getSuccess(); + return isSuccess(); case O1: return getO1(); @@ -104685,21 +105979,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_partition_result) - return this.equals((get_partition_result)that); + if (that instanceof update_last_access_time_result) + return this.equals((update_last_access_time_result)that); return false; } - public boolean equals(get_partition_result that) { + public boolean equals(update_last_access_time_result that) { if (that == null) return false; - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); + boolean this_present_success = true; + boolean that_present_success = true; if (this_present_success || that_present_success) { if (!(this_present_success && that_present_success)) return false; - if (!this.success.equals(that.success)) + if (this.success != that.success) return false; } @@ -104728,7 +106022,7 @@ public boolean equals(get_partition_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true && (isSetSuccess()); + boolean present_success = true; list.add(present_success); if (present_success) list.add(success); @@ -104747,7 +106041,7 @@ public int hashCode() { } @Override - public int compareTo(get_partition_result other) { + public int compareTo(update_last_access_time_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -104801,15 +106095,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_partition_result("); + StringBuilder sb = new StringBuilder("update_last_access_time_result("); boolean first = true; sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } + sb.append(this.success); first = false; if (!first) sb.append(", "); sb.append("o1:"); @@ -104834,9 +106124,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (success != null) { - success.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -104849,21 +106136,23 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class get_partition_resultStandardSchemeFactory implements SchemeFactory { - public get_partition_resultStandardScheme getScheme() { - return new get_partition_resultStandardScheme(); + private static class update_last_access_time_resultStandardSchemeFactory implements SchemeFactory { + public update_last_access_time_resultStandardScheme getScheme() { + return new update_last_access_time_resultStandardScheme(); } } - private static class get_partition_resultStandardScheme extends StandardScheme { + private static class update_last_access_time_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, update_last_access_time_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -104874,9 +106163,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_resul } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new Partition(); - struct.success.read(iprot); + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.success = iprot.readBool(); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -104884,7 +106172,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_resul break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -104893,7 +106181,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_resul break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new NoSuchObjectException(); + struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { @@ -104909,13 +106197,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_resul struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, update_last_access_time_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { + if (struct.isSetSuccess()) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); + oprot.writeBool(struct.success); oprot.writeFieldEnd(); } if (struct.o1 != null) { @@ -104934,16 +106222,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_resu } - private static class get_partition_resultTupleSchemeFactory implements SchemeFactory { - public get_partition_resultTupleScheme getScheme() { - return new get_partition_resultTupleScheme(); + private static class update_last_access_time_resultTupleSchemeFactory implements SchemeFactory { + public update_last_access_time_resultTupleScheme getScheme() { + return new update_last_access_time_resultTupleScheme(); } } - private static class get_partition_resultTupleScheme extends TupleScheme { + private static class update_last_access_time_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, update_last_access_time_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -104957,7 +106245,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_resul } oprot.writeBitSet(optionals, 3); if (struct.isSetSuccess()) { - struct.success.write(oprot); + oprot.writeBool(struct.success); } if (struct.isSetO1()) { struct.o1.write(oprot); @@ -104968,21 +106256,20 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_resul } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, update_last_access_time_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = new Partition(); - struct.success.read(iprot); + struct.success = iprot.readBool(); struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } if (incoming.get(2)) { - struct.o2 = new NoSuchObjectException(); + struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); } @@ -105624,15 +106911,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1282 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1282.size); - String _key1283; - String _val1284; - for (int _i1285 = 0; _i1285 < _map1282.size; ++_i1285) + org.apache.thrift.protocol.TMap _map1300 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1300.size); + String _key1301; + String _val1302; + for (int _i1303 = 0; _i1303 < _map1300.size; ++_i1303) { - _key1283 = iprot.readString(); - _val1284 = iprot.readString(); - struct.partitionSpecs.put(_key1283, _val1284); + _key1301 = iprot.readString(); + _val1302 = iprot.readString(); + struct.partitionSpecs.put(_key1301, _val1302); } iprot.readMapEnd(); } @@ -105690,10 +106977,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1286 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1304 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1286.getKey()); - oprot.writeString(_iter1286.getValue()); + oprot.writeString(_iter1304.getKey()); + oprot.writeString(_iter1304.getValue()); } oprot.writeMapEnd(); } @@ -105756,10 +107043,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1287 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1305 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1287.getKey()); - oprot.writeString(_iter1287.getValue()); + oprot.writeString(_iter1305.getKey()); + oprot.writeString(_iter1305.getValue()); } } } @@ -105783,15 +107070,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1288 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1288.size); - String _key1289; - String _val1290; - for (int _i1291 = 0; _i1291 < _map1288.size; ++_i1291) + org.apache.thrift.protocol.TMap _map1306 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1306.size); + String _key1307; + String _val1308; + for (int _i1309 = 0; _i1309 < _map1306.size; ++_i1309) { - _key1289 = iprot.readString(); - _val1290 = iprot.readString(); - struct.partitionSpecs.put(_key1289, _val1290); + _key1307 = iprot.readString(); + _val1308 = iprot.readString(); + struct.partitionSpecs.put(_key1307, _val1308); } } struct.setPartitionSpecsIsSet(true); @@ -107237,15 +108524,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1292 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1292.size); - String _key1293; - String _val1294; - for (int _i1295 = 0; _i1295 < _map1292.size; ++_i1295) + org.apache.thrift.protocol.TMap _map1310 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1310.size); + String _key1311; + String _val1312; + for (int _i1313 = 0; _i1313 < _map1310.size; ++_i1313) { - _key1293 = iprot.readString(); - _val1294 = iprot.readString(); - struct.partitionSpecs.put(_key1293, _val1294); + _key1311 = iprot.readString(); + _val1312 = iprot.readString(); + struct.partitionSpecs.put(_key1311, _val1312); } iprot.readMapEnd(); } @@ -107303,10 +108590,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1296 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1314 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1296.getKey()); - oprot.writeString(_iter1296.getValue()); + oprot.writeString(_iter1314.getKey()); + oprot.writeString(_iter1314.getValue()); } oprot.writeMapEnd(); } @@ -107369,10 +108656,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1297 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1315 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1297.getKey()); - oprot.writeString(_iter1297.getValue()); + oprot.writeString(_iter1315.getKey()); + oprot.writeString(_iter1315.getValue()); } } } @@ -107396,15 +108683,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1298 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1298.size); - String _key1299; - String _val1300; - for (int _i1301 = 0; _i1301 < _map1298.size; ++_i1301) + org.apache.thrift.protocol.TMap _map1316 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1316.size); + String _key1317; + String _val1318; + for (int _i1319 = 0; _i1319 < _map1316.size; ++_i1319) { - _key1299 = iprot.readString(); - _val1300 = iprot.readString(); - struct.partitionSpecs.put(_key1299, _val1300); + _key1317 = iprot.readString(); + _val1318 = iprot.readString(); + struct.partitionSpecs.put(_key1317, _val1318); } } struct.setPartitionSpecsIsSet(true); @@ -108069,14 +109356,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1302 = iprot.readListBegin(); - struct.success = new ArrayList(_list1302.size); - Partition _elem1303; - for (int _i1304 = 0; _i1304 < _list1302.size; ++_i1304) + org.apache.thrift.protocol.TList _list1320 = iprot.readListBegin(); + struct.success = new ArrayList(_list1320.size); + Partition _elem1321; + for (int _i1322 = 0; _i1322 < _list1320.size; ++_i1322) { - _elem1303 = new Partition(); - _elem1303.read(iprot); - struct.success.add(_elem1303); + _elem1321 = new Partition(); + _elem1321.read(iprot); + struct.success.add(_elem1321); } iprot.readListEnd(); } @@ -108138,9 +109425,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1305 : struct.success) + for (Partition _iter1323 : struct.success) { - _iter1305.write(oprot); + _iter1323.write(oprot); } oprot.writeListEnd(); } @@ -108203,9 +109490,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1306 : struct.success) + for (Partition _iter1324 : struct.success) { - _iter1306.write(oprot); + _iter1324.write(oprot); } } } @@ -108229,14 +109516,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1307 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1307.size); - Partition _elem1308; - for (int _i1309 = 0; _i1309 < _list1307.size; ++_i1309) + org.apache.thrift.protocol.TList _list1325 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1325.size); + Partition _elem1326; + for (int _i1327 = 0; _i1327 < _list1325.size; ++_i1327) { - _elem1308 = new Partition(); - _elem1308.read(iprot); - struct.success.add(_elem1308); + _elem1326 = new Partition(); + _elem1326.read(iprot); + struct.success.add(_elem1326); } } struct.setSuccessIsSet(true); @@ -108935,13 +110222,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1310 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1310.size); - String _elem1311; - for (int _i1312 = 0; _i1312 < _list1310.size; ++_i1312) + org.apache.thrift.protocol.TList _list1328 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1328.size); + String _elem1329; + for (int _i1330 = 0; _i1330 < _list1328.size; ++_i1330) { - _elem1311 = iprot.readString(); - struct.part_vals.add(_elem1311); + _elem1329 = iprot.readString(); + struct.part_vals.add(_elem1329); } iprot.readListEnd(); } @@ -108961,13 +110248,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1313 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1313.size); - String _elem1314; - for (int _i1315 = 0; _i1315 < _list1313.size; ++_i1315) + org.apache.thrift.protocol.TList _list1331 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1331.size); + String _elem1332; + for (int _i1333 = 0; _i1333 < _list1331.size; ++_i1333) { - _elem1314 = iprot.readString(); - struct.group_names.add(_elem1314); + _elem1332 = iprot.readString(); + struct.group_names.add(_elem1332); } iprot.readListEnd(); } @@ -109003,9 +110290,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1316 : struct.part_vals) + for (String _iter1334 : struct.part_vals) { - oprot.writeString(_iter1316); + oprot.writeString(_iter1334); } oprot.writeListEnd(); } @@ -109020,9 +110307,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1317 : struct.group_names) + for (String _iter1335 : struct.group_names) { - oprot.writeString(_iter1317); + oprot.writeString(_iter1335); } oprot.writeListEnd(); } @@ -109071,9 +110358,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1318 : struct.part_vals) + for (String _iter1336 : struct.part_vals) { - oprot.writeString(_iter1318); + oprot.writeString(_iter1336); } } } @@ -109083,9 +110370,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1319 : struct.group_names) + for (String _iter1337 : struct.group_names) { - oprot.writeString(_iter1319); + oprot.writeString(_iter1337); } } } @@ -109105,13 +110392,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1320 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1320.size); - String _elem1321; - for (int _i1322 = 0; _i1322 < _list1320.size; ++_i1322) + org.apache.thrift.protocol.TList _list1338 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1338.size); + String _elem1339; + for (int _i1340 = 0; _i1340 < _list1338.size; ++_i1340) { - _elem1321 = iprot.readString(); - struct.part_vals.add(_elem1321); + _elem1339 = iprot.readString(); + struct.part_vals.add(_elem1339); } } struct.setPart_valsIsSet(true); @@ -109122,13 +110409,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1323 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1323.size); - String _elem1324; - for (int _i1325 = 0; _i1325 < _list1323.size; ++_i1325) + org.apache.thrift.protocol.TList _list1341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1341.size); + String _elem1342; + for (int _i1343 = 0; _i1343 < _list1341.size; ++_i1343) { - _elem1324 = iprot.readString(); - struct.group_names.add(_elem1324); + _elem1342 = iprot.readString(); + struct.group_names.add(_elem1342); } } struct.setGroup_namesIsSet(true); @@ -111897,14 +113184,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1326 = iprot.readListBegin(); - struct.success = new ArrayList(_list1326.size); - Partition _elem1327; - for (int _i1328 = 0; _i1328 < _list1326.size; ++_i1328) + org.apache.thrift.protocol.TList _list1344 = iprot.readListBegin(); + struct.success = new ArrayList(_list1344.size); + Partition _elem1345; + for (int _i1346 = 0; _i1346 < _list1344.size; ++_i1346) { - _elem1327 = new Partition(); - _elem1327.read(iprot); - struct.success.add(_elem1327); + _elem1345 = new Partition(); + _elem1345.read(iprot); + struct.success.add(_elem1345); } iprot.readListEnd(); } @@ -111948,9 +113235,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1329 : struct.success) + for (Partition _iter1347 : struct.success) { - _iter1329.write(oprot); + _iter1347.write(oprot); } oprot.writeListEnd(); } @@ -111997,9 +113284,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1330 : struct.success) + for (Partition _iter1348 : struct.success) { - _iter1330.write(oprot); + _iter1348.write(oprot); } } } @@ -112017,14 +113304,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1331 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1331.size); - Partition _elem1332; - for (int _i1333 = 0; _i1333 < _list1331.size; ++_i1333) + org.apache.thrift.protocol.TList _list1349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1349.size); + Partition _elem1350; + for (int _i1351 = 0; _i1351 < _list1349.size; ++_i1351) { - _elem1332 = new Partition(); - _elem1332.read(iprot); - struct.success.add(_elem1332); + _elem1350 = new Partition(); + _elem1350.read(iprot); + struct.success.add(_elem1350); } } struct.setSuccessIsSet(true); @@ -112714,13 +114001,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1334 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1334.size); - String _elem1335; - for (int _i1336 = 0; _i1336 < _list1334.size; ++_i1336) + org.apache.thrift.protocol.TList _list1352 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1352.size); + String _elem1353; + for (int _i1354 = 0; _i1354 < _list1352.size; ++_i1354) { - _elem1335 = iprot.readString(); - struct.group_names.add(_elem1335); + _elem1353 = iprot.readString(); + struct.group_names.add(_elem1353); } iprot.readListEnd(); } @@ -112764,9 +114051,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1337 : struct.group_names) + for (String _iter1355 : struct.group_names) { - oprot.writeString(_iter1337); + oprot.writeString(_iter1355); } oprot.writeListEnd(); } @@ -112821,9 +114108,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1338 : struct.group_names) + for (String _iter1356 : struct.group_names) { - oprot.writeString(_iter1338); + oprot.writeString(_iter1356); } } } @@ -112851,13 +114138,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1339 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1339.size); - String _elem1340; - for (int _i1341 = 0; _i1341 < _list1339.size; ++_i1341) + org.apache.thrift.protocol.TList _list1357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1357.size); + String _elem1358; + for (int _i1359 = 0; _i1359 < _list1357.size; ++_i1359) { - _elem1340 = iprot.readString(); - struct.group_names.add(_elem1340); + _elem1358 = iprot.readString(); + struct.group_names.add(_elem1358); } } struct.setGroup_namesIsSet(true); @@ -113344,14 +114631,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1342 = iprot.readListBegin(); - struct.success = new ArrayList(_list1342.size); - Partition _elem1343; - for (int _i1344 = 0; _i1344 < _list1342.size; ++_i1344) + org.apache.thrift.protocol.TList _list1360 = iprot.readListBegin(); + struct.success = new ArrayList(_list1360.size); + Partition _elem1361; + for (int _i1362 = 0; _i1362 < _list1360.size; ++_i1362) { - _elem1343 = new Partition(); - _elem1343.read(iprot); - struct.success.add(_elem1343); + _elem1361 = new Partition(); + _elem1361.read(iprot); + struct.success.add(_elem1361); } iprot.readListEnd(); } @@ -113395,9 +114682,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1345 : struct.success) + for (Partition _iter1363 : struct.success) { - _iter1345.write(oprot); + _iter1363.write(oprot); } oprot.writeListEnd(); } @@ -113444,9 +114731,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1346 : struct.success) + for (Partition _iter1364 : struct.success) { - _iter1346.write(oprot); + _iter1364.write(oprot); } } } @@ -113464,14 +114751,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1347 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1347.size); - Partition _elem1348; - for (int _i1349 = 0; _i1349 < _list1347.size; ++_i1349) + org.apache.thrift.protocol.TList _list1365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1365.size); + Partition _elem1366; + for (int _i1367 = 0; _i1367 < _list1365.size; ++_i1367) { - _elem1348 = new Partition(); - _elem1348.read(iprot); - struct.success.add(_elem1348); + _elem1366 = new Partition(); + _elem1366.read(iprot); + struct.success.add(_elem1366); } } struct.setSuccessIsSet(true); @@ -114534,14 +115821,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1350 = iprot.readListBegin(); - struct.success = new ArrayList(_list1350.size); - PartitionSpec _elem1351; - for (int _i1352 = 0; _i1352 < _list1350.size; ++_i1352) + org.apache.thrift.protocol.TList _list1368 = iprot.readListBegin(); + struct.success = new ArrayList(_list1368.size); + PartitionSpec _elem1369; + for (int _i1370 = 0; _i1370 < _list1368.size; ++_i1370) { - _elem1351 = new PartitionSpec(); - _elem1351.read(iprot); - struct.success.add(_elem1351); + _elem1369 = new PartitionSpec(); + _elem1369.read(iprot); + struct.success.add(_elem1369); } iprot.readListEnd(); } @@ -114585,9 +115872,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1353 : struct.success) + for (PartitionSpec _iter1371 : struct.success) { - _iter1353.write(oprot); + _iter1371.write(oprot); } oprot.writeListEnd(); } @@ -114634,9 +115921,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1354 : struct.success) + for (PartitionSpec _iter1372 : struct.success) { - _iter1354.write(oprot); + _iter1372.write(oprot); } } } @@ -114654,14 +115941,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1355 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1355.size); - PartitionSpec _elem1356; - for (int _i1357 = 0; _i1357 < _list1355.size; ++_i1357) + org.apache.thrift.protocol.TList _list1373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1373.size); + PartitionSpec _elem1374; + for (int _i1375 = 0; _i1375 < _list1373.size; ++_i1375) { - _elem1356 = new PartitionSpec(); - _elem1356.read(iprot); - struct.success.add(_elem1356); + _elem1374 = new PartitionSpec(); + _elem1374.read(iprot); + struct.success.add(_elem1374); } } struct.setSuccessIsSet(true); @@ -115721,13 +117008,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1358 = iprot.readListBegin(); - struct.success = new ArrayList(_list1358.size); - String _elem1359; - for (int _i1360 = 0; _i1360 < _list1358.size; ++_i1360) + org.apache.thrift.protocol.TList _list1376 = iprot.readListBegin(); + struct.success = new ArrayList(_list1376.size); + String _elem1377; + for (int _i1378 = 0; _i1378 < _list1376.size; ++_i1378) { - _elem1359 = iprot.readString(); - struct.success.add(_elem1359); + _elem1377 = iprot.readString(); + struct.success.add(_elem1377); } iprot.readListEnd(); } @@ -115771,9 +117058,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1361 : struct.success) + for (String _iter1379 : struct.success) { - oprot.writeString(_iter1361); + oprot.writeString(_iter1379); } oprot.writeListEnd(); } @@ -115820,9 +117107,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1362 : struct.success) + for (String _iter1380 : struct.success) { - oprot.writeString(_iter1362); + oprot.writeString(_iter1380); } } } @@ -115840,13 +117127,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1363 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1363.size); - String _elem1364; - for (int _i1365 = 0; _i1365 < _list1363.size; ++_i1365) + org.apache.thrift.protocol.TList _list1381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1381.size); + String _elem1382; + for (int _i1383 = 0; _i1383 < _list1381.size; ++_i1383) { - _elem1364 = iprot.readString(); - struct.success.add(_elem1364); + _elem1382 = iprot.readString(); + struct.success.add(_elem1382); } } struct.setSuccessIsSet(true); @@ -117377,13 +118664,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1366 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1366.size); - String _elem1367; - for (int _i1368 = 0; _i1368 < _list1366.size; ++_i1368) + org.apache.thrift.protocol.TList _list1384 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1384.size); + String _elem1385; + for (int _i1386 = 0; _i1386 < _list1384.size; ++_i1386) { - _elem1367 = iprot.readString(); - struct.part_vals.add(_elem1367); + _elem1385 = iprot.readString(); + struct.part_vals.add(_elem1385); } iprot.readListEnd(); } @@ -117427,9 +118714,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1369 : struct.part_vals) + for (String _iter1387 : struct.part_vals) { - oprot.writeString(_iter1369); + oprot.writeString(_iter1387); } oprot.writeListEnd(); } @@ -117478,9 +118765,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1370 : struct.part_vals) + for (String _iter1388 : struct.part_vals) { - oprot.writeString(_iter1370); + oprot.writeString(_iter1388); } } } @@ -117503,13 +118790,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1371 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1371.size); - String _elem1372; - for (int _i1373 = 0; _i1373 < _list1371.size; ++_i1373) + org.apache.thrift.protocol.TList _list1389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1389.size); + String _elem1390; + for (int _i1391 = 0; _i1391 < _list1389.size; ++_i1391) { - _elem1372 = iprot.readString(); - struct.part_vals.add(_elem1372); + _elem1390 = iprot.readString(); + struct.part_vals.add(_elem1390); } } struct.setPart_valsIsSet(true); @@ -118000,14 +119287,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1374 = iprot.readListBegin(); - struct.success = new ArrayList(_list1374.size); - Partition _elem1375; - for (int _i1376 = 0; _i1376 < _list1374.size; ++_i1376) + org.apache.thrift.protocol.TList _list1392 = iprot.readListBegin(); + struct.success = new ArrayList(_list1392.size); + Partition _elem1393; + for (int _i1394 = 0; _i1394 < _list1392.size; ++_i1394) { - _elem1375 = new Partition(); - _elem1375.read(iprot); - struct.success.add(_elem1375); + _elem1393 = new Partition(); + _elem1393.read(iprot); + struct.success.add(_elem1393); } iprot.readListEnd(); } @@ -118051,9 +119338,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1377 : struct.success) + for (Partition _iter1395 : struct.success) { - _iter1377.write(oprot); + _iter1395.write(oprot); } oprot.writeListEnd(); } @@ -118100,9 +119387,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1378 : struct.success) + for (Partition _iter1396 : struct.success) { - _iter1378.write(oprot); + _iter1396.write(oprot); } } } @@ -118120,14 +119407,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1379 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1379.size); - Partition _elem1380; - for (int _i1381 = 0; _i1381 < _list1379.size; ++_i1381) + org.apache.thrift.protocol.TList _list1397 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1397.size); + Partition _elem1398; + for (int _i1399 = 0; _i1399 < _list1397.size; ++_i1399) { - _elem1380 = new Partition(); - _elem1380.read(iprot); - struct.success.add(_elem1380); + _elem1398 = new Partition(); + _elem1398.read(iprot); + struct.success.add(_elem1398); } } struct.setSuccessIsSet(true); @@ -118899,13 +120186,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1382 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1382.size); - String _elem1383; - for (int _i1384 = 0; _i1384 < _list1382.size; ++_i1384) + org.apache.thrift.protocol.TList _list1400 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1400.size); + String _elem1401; + for (int _i1402 = 0; _i1402 < _list1400.size; ++_i1402) { - _elem1383 = iprot.readString(); - struct.part_vals.add(_elem1383); + _elem1401 = iprot.readString(); + struct.part_vals.add(_elem1401); } iprot.readListEnd(); } @@ -118933,13 +120220,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1385 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1385.size); - String _elem1386; - for (int _i1387 = 0; _i1387 < _list1385.size; ++_i1387) + org.apache.thrift.protocol.TList _list1403 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1403.size); + String _elem1404; + for (int _i1405 = 0; _i1405 < _list1403.size; ++_i1405) { - _elem1386 = iprot.readString(); - struct.group_names.add(_elem1386); + _elem1404 = iprot.readString(); + struct.group_names.add(_elem1404); } iprot.readListEnd(); } @@ -118975,9 +120262,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1388 : struct.part_vals) + for (String _iter1406 : struct.part_vals) { - oprot.writeString(_iter1388); + oprot.writeString(_iter1406); } oprot.writeListEnd(); } @@ -118995,9 +120282,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1389 : struct.group_names) + for (String _iter1407 : struct.group_names) { - oprot.writeString(_iter1389); + oprot.writeString(_iter1407); } oprot.writeListEnd(); } @@ -119049,9 +120336,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1390 : struct.part_vals) + for (String _iter1408 : struct.part_vals) { - oprot.writeString(_iter1390); + oprot.writeString(_iter1408); } } } @@ -119064,9 +120351,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1391 : struct.group_names) + for (String _iter1409 : struct.group_names) { - oprot.writeString(_iter1391); + oprot.writeString(_iter1409); } } } @@ -119086,13 +120373,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1392 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1392.size); - String _elem1393; - for (int _i1394 = 0; _i1394 < _list1392.size; ++_i1394) + org.apache.thrift.protocol.TList _list1410 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1410.size); + String _elem1411; + for (int _i1412 = 0; _i1412 < _list1410.size; ++_i1412) { - _elem1393 = iprot.readString(); - struct.part_vals.add(_elem1393); + _elem1411 = iprot.readString(); + struct.part_vals.add(_elem1411); } } struct.setPart_valsIsSet(true); @@ -119107,13 +120394,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1395 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1395.size); - String _elem1396; - for (int _i1397 = 0; _i1397 < _list1395.size; ++_i1397) + org.apache.thrift.protocol.TList _list1413 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1413.size); + String _elem1414; + for (int _i1415 = 0; _i1415 < _list1413.size; ++_i1415) { - _elem1396 = iprot.readString(); - struct.group_names.add(_elem1396); + _elem1414 = iprot.readString(); + struct.group_names.add(_elem1414); } } struct.setGroup_namesIsSet(true); @@ -119600,14 +120887,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1398 = iprot.readListBegin(); - struct.success = new ArrayList(_list1398.size); - Partition _elem1399; - for (int _i1400 = 0; _i1400 < _list1398.size; ++_i1400) + org.apache.thrift.protocol.TList _list1416 = iprot.readListBegin(); + struct.success = new ArrayList(_list1416.size); + Partition _elem1417; + for (int _i1418 = 0; _i1418 < _list1416.size; ++_i1418) { - _elem1399 = new Partition(); - _elem1399.read(iprot); - struct.success.add(_elem1399); + _elem1417 = new Partition(); + _elem1417.read(iprot); + struct.success.add(_elem1417); } iprot.readListEnd(); } @@ -119651,9 +120938,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1401 : struct.success) + for (Partition _iter1419 : struct.success) { - _iter1401.write(oprot); + _iter1419.write(oprot); } oprot.writeListEnd(); } @@ -119700,9 +120987,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1402 : struct.success) + for (Partition _iter1420 : struct.success) { - _iter1402.write(oprot); + _iter1420.write(oprot); } } } @@ -119720,14 +121007,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1403 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1403.size); - Partition _elem1404; - for (int _i1405 = 0; _i1405 < _list1403.size; ++_i1405) + org.apache.thrift.protocol.TList _list1421 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1421.size); + Partition _elem1422; + for (int _i1423 = 0; _i1423 < _list1421.size; ++_i1423) { - _elem1404 = new Partition(); - _elem1404.read(iprot); - struct.success.add(_elem1404); + _elem1422 = new Partition(); + _elem1422.read(iprot); + struct.success.add(_elem1422); } } struct.setSuccessIsSet(true); @@ -120320,13 +121607,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1406 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1406.size); - String _elem1407; - for (int _i1408 = 0; _i1408 < _list1406.size; ++_i1408) + org.apache.thrift.protocol.TList _list1424 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1424.size); + String _elem1425; + for (int _i1426 = 0; _i1426 < _list1424.size; ++_i1426) { - _elem1407 = iprot.readString(); - struct.part_vals.add(_elem1407); + _elem1425 = iprot.readString(); + struct.part_vals.add(_elem1425); } iprot.readListEnd(); } @@ -120370,9 +121657,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1409 : struct.part_vals) + for (String _iter1427 : struct.part_vals) { - oprot.writeString(_iter1409); + oprot.writeString(_iter1427); } oprot.writeListEnd(); } @@ -120421,9 +121708,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1410 : struct.part_vals) + for (String _iter1428 : struct.part_vals) { - oprot.writeString(_iter1410); + oprot.writeString(_iter1428); } } } @@ -120446,13 +121733,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1411 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1411.size); - String _elem1412; - for (int _i1413 = 0; _i1413 < _list1411.size; ++_i1413) + org.apache.thrift.protocol.TList _list1429 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1429.size); + String _elem1430; + for (int _i1431 = 0; _i1431 < _list1429.size; ++_i1431) { - _elem1412 = iprot.readString(); - struct.part_vals.add(_elem1412); + _elem1430 = iprot.readString(); + struct.part_vals.add(_elem1430); } } struct.setPart_valsIsSet(true); @@ -120940,13 +122227,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1414 = iprot.readListBegin(); - struct.success = new ArrayList(_list1414.size); - String _elem1415; - for (int _i1416 = 0; _i1416 < _list1414.size; ++_i1416) + org.apache.thrift.protocol.TList _list1432 = iprot.readListBegin(); + struct.success = new ArrayList(_list1432.size); + String _elem1433; + for (int _i1434 = 0; _i1434 < _list1432.size; ++_i1434) { - _elem1415 = iprot.readString(); - struct.success.add(_elem1415); + _elem1433 = iprot.readString(); + struct.success.add(_elem1433); } iprot.readListEnd(); } @@ -120990,9 +122277,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1417 : struct.success) + for (String _iter1435 : struct.success) { - oprot.writeString(_iter1417); + oprot.writeString(_iter1435); } oprot.writeListEnd(); } @@ -121039,9 +122326,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1418 : struct.success) + for (String _iter1436 : struct.success) { - oprot.writeString(_iter1418); + oprot.writeString(_iter1436); } } } @@ -121059,13 +122346,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1419 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1419.size); - String _elem1420; - for (int _i1421 = 0; _i1421 < _list1419.size; ++_i1421) + org.apache.thrift.protocol.TList _list1437 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1437.size); + String _elem1438; + for (int _i1439 = 0; _i1439 < _list1437.size; ++_i1439) { - _elem1420 = iprot.readString(); - struct.success.add(_elem1420); + _elem1438 = iprot.readString(); + struct.success.add(_elem1438); } } struct.setSuccessIsSet(true); @@ -122232,14 +123519,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1422 = iprot.readListBegin(); - struct.success = new ArrayList(_list1422.size); - Partition _elem1423; - for (int _i1424 = 0; _i1424 < _list1422.size; ++_i1424) + org.apache.thrift.protocol.TList _list1440 = iprot.readListBegin(); + struct.success = new ArrayList(_list1440.size); + Partition _elem1441; + for (int _i1442 = 0; _i1442 < _list1440.size; ++_i1442) { - _elem1423 = new Partition(); - _elem1423.read(iprot); - struct.success.add(_elem1423); + _elem1441 = new Partition(); + _elem1441.read(iprot); + struct.success.add(_elem1441); } iprot.readListEnd(); } @@ -122283,9 +123570,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1425 : struct.success) + for (Partition _iter1443 : struct.success) { - _iter1425.write(oprot); + _iter1443.write(oprot); } oprot.writeListEnd(); } @@ -122332,9 +123619,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1426 : struct.success) + for (Partition _iter1444 : struct.success) { - _iter1426.write(oprot); + _iter1444.write(oprot); } } } @@ -122352,14 +123639,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1427 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1427.size); - Partition _elem1428; - for (int _i1429 = 0; _i1429 < _list1427.size; ++_i1429) + org.apache.thrift.protocol.TList _list1445 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1445.size); + Partition _elem1446; + for (int _i1447 = 0; _i1447 < _list1445.size; ++_i1447) { - _elem1428 = new Partition(); - _elem1428.read(iprot); - struct.success.add(_elem1428); + _elem1446 = new Partition(); + _elem1446.read(iprot); + struct.success.add(_elem1446); } } struct.setSuccessIsSet(true); @@ -123526,14 +124813,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1430 = iprot.readListBegin(); - struct.success = new ArrayList(_list1430.size); - PartitionSpec _elem1431; - for (int _i1432 = 0; _i1432 < _list1430.size; ++_i1432) + org.apache.thrift.protocol.TList _list1448 = iprot.readListBegin(); + struct.success = new ArrayList(_list1448.size); + PartitionSpec _elem1449; + for (int _i1450 = 0; _i1450 < _list1448.size; ++_i1450) { - _elem1431 = new PartitionSpec(); - _elem1431.read(iprot); - struct.success.add(_elem1431); + _elem1449 = new PartitionSpec(); + _elem1449.read(iprot); + struct.success.add(_elem1449); } iprot.readListEnd(); } @@ -123577,9 +124864,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1433 : struct.success) + for (PartitionSpec _iter1451 : struct.success) { - _iter1433.write(oprot); + _iter1451.write(oprot); } oprot.writeListEnd(); } @@ -123626,9 +124913,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1434 : struct.success) + for (PartitionSpec _iter1452 : struct.success) { - _iter1434.write(oprot); + _iter1452.write(oprot); } } } @@ -123646,14 +124933,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1435 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1435.size); - PartitionSpec _elem1436; - for (int _i1437 = 0; _i1437 < _list1435.size; ++_i1437) + org.apache.thrift.protocol.TList _list1453 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1453.size); + PartitionSpec _elem1454; + for (int _i1455 = 0; _i1455 < _list1453.size; ++_i1455) { - _elem1436 = new PartitionSpec(); - _elem1436.read(iprot); - struct.success.add(_elem1436); + _elem1454 = new PartitionSpec(); + _elem1454.read(iprot); + struct.success.add(_elem1454); } } struct.setSuccessIsSet(true); @@ -126237,13 +127524,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1438 = iprot.readListBegin(); - struct.names = new ArrayList(_list1438.size); - String _elem1439; - for (int _i1440 = 0; _i1440 < _list1438.size; ++_i1440) + org.apache.thrift.protocol.TList _list1456 = iprot.readListBegin(); + struct.names = new ArrayList(_list1456.size); + String _elem1457; + for (int _i1458 = 0; _i1458 < _list1456.size; ++_i1458) { - _elem1439 = iprot.readString(); - struct.names.add(_elem1439); + _elem1457 = iprot.readString(); + struct.names.add(_elem1457); } iprot.readListEnd(); } @@ -126279,9 +127566,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1441 : struct.names) + for (String _iter1459 : struct.names) { - oprot.writeString(_iter1441); + oprot.writeString(_iter1459); } oprot.writeListEnd(); } @@ -126324,9 +127611,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1442 : struct.names) + for (String _iter1460 : struct.names) { - oprot.writeString(_iter1442); + oprot.writeString(_iter1460); } } } @@ -126346,13 +127633,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1443 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1443.size); - String _elem1444; - for (int _i1445 = 0; _i1445 < _list1443.size; ++_i1445) + org.apache.thrift.protocol.TList _list1461 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1461.size); + String _elem1462; + for (int _i1463 = 0; _i1463 < _list1461.size; ++_i1463) { - _elem1444 = iprot.readString(); - struct.names.add(_elem1444); + _elem1462 = iprot.readString(); + struct.names.add(_elem1462); } } struct.setNamesIsSet(true); @@ -126839,14 +128126,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1446 = iprot.readListBegin(); - struct.success = new ArrayList(_list1446.size); - Partition _elem1447; - for (int _i1448 = 0; _i1448 < _list1446.size; ++_i1448) + org.apache.thrift.protocol.TList _list1464 = iprot.readListBegin(); + struct.success = new ArrayList(_list1464.size); + Partition _elem1465; + for (int _i1466 = 0; _i1466 < _list1464.size; ++_i1466) { - _elem1447 = new Partition(); - _elem1447.read(iprot); - struct.success.add(_elem1447); + _elem1465 = new Partition(); + _elem1465.read(iprot); + struct.success.add(_elem1465); } iprot.readListEnd(); } @@ -126890,9 +128177,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1449 : struct.success) + for (Partition _iter1467 : struct.success) { - _iter1449.write(oprot); + _iter1467.write(oprot); } oprot.writeListEnd(); } @@ -126939,9 +128226,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1450 : struct.success) + for (Partition _iter1468 : struct.success) { - _iter1450.write(oprot); + _iter1468.write(oprot); } } } @@ -126959,14 +128246,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1451 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1451.size); - Partition _elem1452; - for (int _i1453 = 0; _i1453 < _list1451.size; ++_i1453) + org.apache.thrift.protocol.TList _list1469 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1469.size); + Partition _elem1470; + for (int _i1471 = 0; _i1471 < _list1469.size; ++_i1471) { - _elem1452 = new Partition(); - _elem1452.read(iprot); - struct.success.add(_elem1452); + _elem1470 = new Partition(); + _elem1470.read(iprot); + struct.success.add(_elem1470); } } struct.setSuccessIsSet(true); @@ -129454,14 +130741,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1454 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1454.size); - Partition _elem1455; - for (int _i1456 = 0; _i1456 < _list1454.size; ++_i1456) + org.apache.thrift.protocol.TList _list1472 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1472.size); + Partition _elem1473; + for (int _i1474 = 0; _i1474 < _list1472.size; ++_i1474) { - _elem1455 = new Partition(); - _elem1455.read(iprot); - struct.new_parts.add(_elem1455); + _elem1473 = new Partition(); + _elem1473.read(iprot); + struct.new_parts.add(_elem1473); } iprot.readListEnd(); } @@ -129497,9 +130784,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1457 : struct.new_parts) + for (Partition _iter1475 : struct.new_parts) { - _iter1457.write(oprot); + _iter1475.write(oprot); } oprot.writeListEnd(); } @@ -129542,9 +130829,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1458 : struct.new_parts) + for (Partition _iter1476 : struct.new_parts) { - _iter1458.write(oprot); + _iter1476.write(oprot); } } } @@ -129564,14 +130851,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1459 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1459.size); - Partition _elem1460; - for (int _i1461 = 0; _i1461 < _list1459.size; ++_i1461) + org.apache.thrift.protocol.TList _list1477 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1477.size); + Partition _elem1478; + for (int _i1479 = 0; _i1479 < _list1477.size; ++_i1479) { - _elem1460 = new Partition(); - _elem1460.read(iprot); - struct.new_parts.add(_elem1460); + _elem1478 = new Partition(); + _elem1478.read(iprot); + struct.new_parts.add(_elem1478); } } struct.setNew_partsIsSet(true); @@ -130624,14 +131911,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1462 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1462.size); - Partition _elem1463; - for (int _i1464 = 0; _i1464 < _list1462.size; ++_i1464) + org.apache.thrift.protocol.TList _list1480 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1480.size); + Partition _elem1481; + for (int _i1482 = 0; _i1482 < _list1480.size; ++_i1482) { - _elem1463 = new Partition(); - _elem1463.read(iprot); - struct.new_parts.add(_elem1463); + _elem1481 = new Partition(); + _elem1481.read(iprot); + struct.new_parts.add(_elem1481); } iprot.readListEnd(); } @@ -130676,9 +131963,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1465 : struct.new_parts) + for (Partition _iter1483 : struct.new_parts) { - _iter1465.write(oprot); + _iter1483.write(oprot); } oprot.writeListEnd(); } @@ -130729,9 +132016,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1466 : struct.new_parts) + for (Partition _iter1484 : struct.new_parts) { - _iter1466.write(oprot); + _iter1484.write(oprot); } } } @@ -130754,14 +132041,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1467 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1467.size); - Partition _elem1468; - for (int _i1469 = 0; _i1469 < _list1467.size; ++_i1469) + org.apache.thrift.protocol.TList _list1485 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1485.size); + Partition _elem1486; + for (int _i1487 = 0; _i1487 < _list1485.size; ++_i1487) { - _elem1468 = new Partition(); - _elem1468.read(iprot); - struct.new_parts.add(_elem1468); + _elem1486 = new Partition(); + _elem1486.read(iprot); + struct.new_parts.add(_elem1486); } } struct.setNew_partsIsSet(true); @@ -133900,13 +135187,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1470 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1470.size); - String _elem1471; - for (int _i1472 = 0; _i1472 < _list1470.size; ++_i1472) + org.apache.thrift.protocol.TList _list1488 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1488.size); + String _elem1489; + for (int _i1490 = 0; _i1490 < _list1488.size; ++_i1490) { - _elem1471 = iprot.readString(); - struct.part_vals.add(_elem1471); + _elem1489 = iprot.readString(); + struct.part_vals.add(_elem1489); } iprot.readListEnd(); } @@ -133951,9 +135238,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1473 : struct.part_vals) + for (String _iter1491 : struct.part_vals) { - oprot.writeString(_iter1473); + oprot.writeString(_iter1491); } oprot.writeListEnd(); } @@ -134004,9 +135291,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1474 : struct.part_vals) + for (String _iter1492 : struct.part_vals) { - oprot.writeString(_iter1474); + oprot.writeString(_iter1492); } } } @@ -134029,13 +135316,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1475 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1475.size); - String _elem1476; - for (int _i1477 = 0; _i1477 < _list1475.size; ++_i1477) + org.apache.thrift.protocol.TList _list1493 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1493.size); + String _elem1494; + for (int _i1495 = 0; _i1495 < _list1493.size; ++_i1495) { - _elem1476 = iprot.readString(); - struct.part_vals.add(_elem1476); + _elem1494 = iprot.readString(); + struct.part_vals.add(_elem1494); } } struct.setPart_valsIsSet(true); @@ -135847,13 +137134,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1478 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1478.size); - String _elem1479; - for (int _i1480 = 0; _i1480 < _list1478.size; ++_i1480) + org.apache.thrift.protocol.TList _list1496 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1496.size); + String _elem1497; + for (int _i1498 = 0; _i1498 < _list1496.size; ++_i1498) { - _elem1479 = iprot.readString(); - struct.part_vals.add(_elem1479); + _elem1497 = iprot.readString(); + struct.part_vals.add(_elem1497); } iprot.readListEnd(); } @@ -135887,9 +137174,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1481 : struct.part_vals) + for (String _iter1499 : struct.part_vals) { - oprot.writeString(_iter1481); + oprot.writeString(_iter1499); } oprot.writeListEnd(); } @@ -135926,9 +137213,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1482 : struct.part_vals) + for (String _iter1500 : struct.part_vals) { - oprot.writeString(_iter1482); + oprot.writeString(_iter1500); } } } @@ -135943,13 +137230,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1483 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1483.size); - String _elem1484; - for (int _i1485 = 0; _i1485 < _list1483.size; ++_i1485) + org.apache.thrift.protocol.TList _list1501 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1501.size); + String _elem1502; + for (int _i1503 = 0; _i1503 < _list1501.size; ++_i1503) { - _elem1484 = iprot.readString(); - struct.part_vals.add(_elem1484); + _elem1502 = iprot.readString(); + struct.part_vals.add(_elem1502); } } struct.setPart_valsIsSet(true); @@ -138104,13 +139391,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1486 = iprot.readListBegin(); - struct.success = new ArrayList(_list1486.size); - String _elem1487; - for (int _i1488 = 0; _i1488 < _list1486.size; ++_i1488) + org.apache.thrift.protocol.TList _list1504 = iprot.readListBegin(); + struct.success = new ArrayList(_list1504.size); + String _elem1505; + for (int _i1506 = 0; _i1506 < _list1504.size; ++_i1506) { - _elem1487 = iprot.readString(); - struct.success.add(_elem1487); + _elem1505 = iprot.readString(); + struct.success.add(_elem1505); } iprot.readListEnd(); } @@ -138145,9 +139432,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1489 : struct.success) + for (String _iter1507 : struct.success) { - oprot.writeString(_iter1489); + oprot.writeString(_iter1507); } oprot.writeListEnd(); } @@ -138186,9 +139473,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1490 : struct.success) + for (String _iter1508 : struct.success) { - oprot.writeString(_iter1490); + oprot.writeString(_iter1508); } } } @@ -138203,13 +139490,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1491 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1491.size); - String _elem1492; - for (int _i1493 = 0; _i1493 < _list1491.size; ++_i1493) + org.apache.thrift.protocol.TList _list1509 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1509.size); + String _elem1510; + for (int _i1511 = 0; _i1511 < _list1509.size; ++_i1511) { - _elem1492 = iprot.readString(); - struct.success.add(_elem1492); + _elem1510 = iprot.readString(); + struct.success.add(_elem1510); } } struct.setSuccessIsSet(true); @@ -138972,15 +140259,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1494 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1494.size); - String _key1495; - String _val1496; - for (int _i1497 = 0; _i1497 < _map1494.size; ++_i1497) + org.apache.thrift.protocol.TMap _map1512 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1512.size); + String _key1513; + String _val1514; + for (int _i1515 = 0; _i1515 < _map1512.size; ++_i1515) { - _key1495 = iprot.readString(); - _val1496 = iprot.readString(); - struct.success.put(_key1495, _val1496); + _key1513 = iprot.readString(); + _val1514 = iprot.readString(); + struct.success.put(_key1513, _val1514); } iprot.readMapEnd(); } @@ -139015,10 +140302,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1498 : struct.success.entrySet()) + for (Map.Entry _iter1516 : struct.success.entrySet()) { - oprot.writeString(_iter1498.getKey()); - oprot.writeString(_iter1498.getValue()); + oprot.writeString(_iter1516.getKey()); + oprot.writeString(_iter1516.getValue()); } oprot.writeMapEnd(); } @@ -139057,10 +140344,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1499 : struct.success.entrySet()) + for (Map.Entry _iter1517 : struct.success.entrySet()) { - oprot.writeString(_iter1499.getKey()); - oprot.writeString(_iter1499.getValue()); + oprot.writeString(_iter1517.getKey()); + oprot.writeString(_iter1517.getValue()); } } } @@ -139075,15 +140362,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1500 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1500.size); - String _key1501; - String _val1502; - for (int _i1503 = 0; _i1503 < _map1500.size; ++_i1503) + org.apache.thrift.protocol.TMap _map1518 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1518.size); + String _key1519; + String _val1520; + for (int _i1521 = 0; _i1521 < _map1518.size; ++_i1521) { - _key1501 = iprot.readString(); - _val1502 = iprot.readString(); - struct.success.put(_key1501, _val1502); + _key1519 = iprot.readString(); + _val1520 = iprot.readString(); + struct.success.put(_key1519, _val1520); } } struct.setSuccessIsSet(true); @@ -139678,15 +140965,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1504 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1504.size); - String _key1505; - String _val1506; - for (int _i1507 = 0; _i1507 < _map1504.size; ++_i1507) + org.apache.thrift.protocol.TMap _map1522 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1522.size); + String _key1523; + String _val1524; + for (int _i1525 = 0; _i1525 < _map1522.size; ++_i1525) { - _key1505 = iprot.readString(); - _val1506 = iprot.readString(); - struct.part_vals.put(_key1505, _val1506); + _key1523 = iprot.readString(); + _val1524 = iprot.readString(); + struct.part_vals.put(_key1523, _val1524); } iprot.readMapEnd(); } @@ -139730,10 +141017,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1508 : struct.part_vals.entrySet()) + for (Map.Entry _iter1526 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1508.getKey()); - oprot.writeString(_iter1508.getValue()); + oprot.writeString(_iter1526.getKey()); + oprot.writeString(_iter1526.getValue()); } oprot.writeMapEnd(); } @@ -139784,10 +141071,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1509 : struct.part_vals.entrySet()) + for (Map.Entry _iter1527 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1509.getKey()); - oprot.writeString(_iter1509.getValue()); + oprot.writeString(_iter1527.getKey()); + oprot.writeString(_iter1527.getValue()); } } } @@ -139810,15 +141097,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1510 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1510.size); - String _key1511; - String _val1512; - for (int _i1513 = 0; _i1513 < _map1510.size; ++_i1513) + org.apache.thrift.protocol.TMap _map1528 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1528.size); + String _key1529; + String _val1530; + for (int _i1531 = 0; _i1531 < _map1528.size; ++_i1531) { - _key1511 = iprot.readString(); - _val1512 = iprot.readString(); - struct.part_vals.put(_key1511, _val1512); + _key1529 = iprot.readString(); + _val1530 = iprot.readString(); + struct.part_vals.put(_key1529, _val1530); } } struct.setPart_valsIsSet(true); @@ -141302,15 +142589,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1514 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1514.size); - String _key1515; - String _val1516; - for (int _i1517 = 0; _i1517 < _map1514.size; ++_i1517) + org.apache.thrift.protocol.TMap _map1532 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1532.size); + String _key1533; + String _val1534; + for (int _i1535 = 0; _i1535 < _map1532.size; ++_i1535) { - _key1515 = iprot.readString(); - _val1516 = iprot.readString(); - struct.part_vals.put(_key1515, _val1516); + _key1533 = iprot.readString(); + _val1534 = iprot.readString(); + struct.part_vals.put(_key1533, _val1534); } iprot.readMapEnd(); } @@ -141354,10 +142641,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1518 : struct.part_vals.entrySet()) + for (Map.Entry _iter1536 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1518.getKey()); - oprot.writeString(_iter1518.getValue()); + oprot.writeString(_iter1536.getKey()); + oprot.writeString(_iter1536.getValue()); } oprot.writeMapEnd(); } @@ -141408,10 +142695,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1519 : struct.part_vals.entrySet()) + for (Map.Entry _iter1537 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1519.getKey()); - oprot.writeString(_iter1519.getValue()); + oprot.writeString(_iter1537.getKey()); + oprot.writeString(_iter1537.getValue()); } } } @@ -141434,15 +142721,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1520 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1520.size); - String _key1521; - String _val1522; - for (int _i1523 = 0; _i1523 < _map1520.size; ++_i1523) + org.apache.thrift.protocol.TMap _map1538 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1538.size); + String _key1539; + String _val1540; + for (int _i1541 = 0; _i1541 < _map1538.size; ++_i1541) { - _key1521 = iprot.readString(); - _val1522 = iprot.readString(); - struct.part_vals.put(_key1521, _val1522); + _key1539 = iprot.readString(); + _val1540 = iprot.readString(); + struct.part_vals.put(_key1539, _val1540); } } struct.setPart_valsIsSet(true); @@ -166098,13 +167385,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1524 = iprot.readListBegin(); - struct.success = new ArrayList(_list1524.size); - String _elem1525; - for (int _i1526 = 0; _i1526 < _list1524.size; ++_i1526) + org.apache.thrift.protocol.TList _list1542 = iprot.readListBegin(); + struct.success = new ArrayList(_list1542.size); + String _elem1543; + for (int _i1544 = 0; _i1544 < _list1542.size; ++_i1544) { - _elem1525 = iprot.readString(); - struct.success.add(_elem1525); + _elem1543 = iprot.readString(); + struct.success.add(_elem1543); } iprot.readListEnd(); } @@ -166139,9 +167426,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1527 : struct.success) + for (String _iter1545 : struct.success) { - oprot.writeString(_iter1527); + oprot.writeString(_iter1545); } oprot.writeListEnd(); } @@ -166180,9 +167467,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1528 : struct.success) + for (String _iter1546 : struct.success) { - oprot.writeString(_iter1528); + oprot.writeString(_iter1546); } } } @@ -166197,13 +167484,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1529 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1529.size); - String _elem1530; - for (int _i1531 = 0; _i1531 < _list1529.size; ++_i1531) + org.apache.thrift.protocol.TList _list1547 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1547.size); + String _elem1548; + for (int _i1549 = 0; _i1549 < _list1547.size; ++_i1549) { - _elem1530 = iprot.readString(); - struct.success.add(_elem1530); + _elem1548 = iprot.readString(); + struct.success.add(_elem1548); } } struct.setSuccessIsSet(true); @@ -170258,13 +171545,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1532 = iprot.readListBegin(); - struct.success = new ArrayList(_list1532.size); - String _elem1533; - for (int _i1534 = 0; _i1534 < _list1532.size; ++_i1534) + org.apache.thrift.protocol.TList _list1550 = iprot.readListBegin(); + struct.success = new ArrayList(_list1550.size); + String _elem1551; + for (int _i1552 = 0; _i1552 < _list1550.size; ++_i1552) { - _elem1533 = iprot.readString(); - struct.success.add(_elem1533); + _elem1551 = iprot.readString(); + struct.success.add(_elem1551); } iprot.readListEnd(); } @@ -170299,9 +171586,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1535 : struct.success) + for (String _iter1553 : struct.success) { - oprot.writeString(_iter1535); + oprot.writeString(_iter1553); } oprot.writeListEnd(); } @@ -170340,9 +171627,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1536 : struct.success) + for (String _iter1554 : struct.success) { - oprot.writeString(_iter1536); + oprot.writeString(_iter1554); } } } @@ -170357,13 +171644,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1537 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1537.size); - String _elem1538; - for (int _i1539 = 0; _i1539 < _list1537.size; ++_i1539) + org.apache.thrift.protocol.TList _list1555 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1555.size); + String _elem1556; + for (int _i1557 = 0; _i1557 < _list1555.size; ++_i1557) { - _elem1538 = iprot.readString(); - struct.success.add(_elem1538); + _elem1556 = iprot.readString(); + struct.success.add(_elem1556); } } struct.setSuccessIsSet(true); @@ -173654,14 +174941,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1540 = iprot.readListBegin(); - struct.success = new ArrayList(_list1540.size); - Role _elem1541; - for (int _i1542 = 0; _i1542 < _list1540.size; ++_i1542) + org.apache.thrift.protocol.TList _list1558 = iprot.readListBegin(); + struct.success = new ArrayList(_list1558.size); + Role _elem1559; + for (int _i1560 = 0; _i1560 < _list1558.size; ++_i1560) { - _elem1541 = new Role(); - _elem1541.read(iprot); - struct.success.add(_elem1541); + _elem1559 = new Role(); + _elem1559.read(iprot); + struct.success.add(_elem1559); } iprot.readListEnd(); } @@ -173696,9 +174983,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1543 : struct.success) + for (Role _iter1561 : struct.success) { - _iter1543.write(oprot); + _iter1561.write(oprot); } oprot.writeListEnd(); } @@ -173737,9 +175024,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1544 : struct.success) + for (Role _iter1562 : struct.success) { - _iter1544.write(oprot); + _iter1562.write(oprot); } } } @@ -173754,14 +175041,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1545 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1545.size); - Role _elem1546; - for (int _i1547 = 0; _i1547 < _list1545.size; ++_i1547) + org.apache.thrift.protocol.TList _list1563 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1563.size); + Role _elem1564; + for (int _i1565 = 0; _i1565 < _list1563.size; ++_i1565) { - _elem1546 = new Role(); - _elem1546.read(iprot); - struct.success.add(_elem1546); + _elem1564 = new Role(); + _elem1564.read(iprot); + struct.success.add(_elem1564); } } struct.setSuccessIsSet(true); @@ -176766,13 +178053,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1548 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1548.size); - String _elem1549; - for (int _i1550 = 0; _i1550 < _list1548.size; ++_i1550) + org.apache.thrift.protocol.TList _list1566 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1566.size); + String _elem1567; + for (int _i1568 = 0; _i1568 < _list1566.size; ++_i1568) { - _elem1549 = iprot.readString(); - struct.group_names.add(_elem1549); + _elem1567 = iprot.readString(); + struct.group_names.add(_elem1567); } iprot.readListEnd(); } @@ -176808,9 +178095,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1551 : struct.group_names) + for (String _iter1569 : struct.group_names) { - oprot.writeString(_iter1551); + oprot.writeString(_iter1569); } oprot.writeListEnd(); } @@ -176853,9 +178140,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1552 : struct.group_names) + for (String _iter1570 : struct.group_names) { - oprot.writeString(_iter1552); + oprot.writeString(_iter1570); } } } @@ -176876,13 +178163,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1553 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1553.size); - String _elem1554; - for (int _i1555 = 0; _i1555 < _list1553.size; ++_i1555) + org.apache.thrift.protocol.TList _list1571 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1571.size); + String _elem1572; + for (int _i1573 = 0; _i1573 < _list1571.size; ++_i1573) { - _elem1554 = iprot.readString(); - struct.group_names.add(_elem1554); + _elem1572 = iprot.readString(); + struct.group_names.add(_elem1572); } } struct.setGroup_namesIsSet(true); @@ -178340,14 +179627,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1556 = iprot.readListBegin(); - struct.success = new ArrayList(_list1556.size); - HiveObjectPrivilege _elem1557; - for (int _i1558 = 0; _i1558 < _list1556.size; ++_i1558) + org.apache.thrift.protocol.TList _list1574 = iprot.readListBegin(); + struct.success = new ArrayList(_list1574.size); + HiveObjectPrivilege _elem1575; + for (int _i1576 = 0; _i1576 < _list1574.size; ++_i1576) { - _elem1557 = new HiveObjectPrivilege(); - _elem1557.read(iprot); - struct.success.add(_elem1557); + _elem1575 = new HiveObjectPrivilege(); + _elem1575.read(iprot); + struct.success.add(_elem1575); } iprot.readListEnd(); } @@ -178382,9 +179669,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1559 : struct.success) + for (HiveObjectPrivilege _iter1577 : struct.success) { - _iter1559.write(oprot); + _iter1577.write(oprot); } oprot.writeListEnd(); } @@ -178423,9 +179710,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1560 : struct.success) + for (HiveObjectPrivilege _iter1578 : struct.success) { - _iter1560.write(oprot); + _iter1578.write(oprot); } } } @@ -178440,14 +179727,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1561 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1561.size); - HiveObjectPrivilege _elem1562; - for (int _i1563 = 0; _i1563 < _list1561.size; ++_i1563) + org.apache.thrift.protocol.TList _list1579 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1579.size); + HiveObjectPrivilege _elem1580; + for (int _i1581 = 0; _i1581 < _list1579.size; ++_i1581) { - _elem1562 = new HiveObjectPrivilege(); - _elem1562.read(iprot); - struct.success.add(_elem1562); + _elem1580 = new HiveObjectPrivilege(); + _elem1580.read(iprot); + struct.success.add(_elem1580); } } struct.setSuccessIsSet(true); @@ -182394,13 +183681,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1564 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1564.size); - String _elem1565; - for (int _i1566 = 0; _i1566 < _list1564.size; ++_i1566) + org.apache.thrift.protocol.TList _list1582 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1582.size); + String _elem1583; + for (int _i1584 = 0; _i1584 < _list1582.size; ++_i1584) { - _elem1565 = iprot.readString(); - struct.group_names.add(_elem1565); + _elem1583 = iprot.readString(); + struct.group_names.add(_elem1583); } iprot.readListEnd(); } @@ -182431,9 +183718,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1567 : struct.group_names) + for (String _iter1585 : struct.group_names) { - oprot.writeString(_iter1567); + oprot.writeString(_iter1585); } oprot.writeListEnd(); } @@ -182470,9 +183757,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1568 : struct.group_names) + for (String _iter1586 : struct.group_names) { - oprot.writeString(_iter1568); + oprot.writeString(_iter1586); } } } @@ -182488,13 +183775,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1569 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1569.size); - String _elem1570; - for (int _i1571 = 0; _i1571 < _list1569.size; ++_i1571) + org.apache.thrift.protocol.TList _list1587 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1587.size); + String _elem1588; + for (int _i1589 = 0; _i1589 < _list1587.size; ++_i1589) { - _elem1570 = iprot.readString(); - struct.group_names.add(_elem1570); + _elem1588 = iprot.readString(); + struct.group_names.add(_elem1588); } } struct.setGroup_namesIsSet(true); @@ -182897,13 +184184,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1572 = iprot.readListBegin(); - struct.success = new ArrayList(_list1572.size); - String _elem1573; - for (int _i1574 = 0; _i1574 < _list1572.size; ++_i1574) + org.apache.thrift.protocol.TList _list1590 = iprot.readListBegin(); + struct.success = new ArrayList(_list1590.size); + String _elem1591; + for (int _i1592 = 0; _i1592 < _list1590.size; ++_i1592) { - _elem1573 = iprot.readString(); - struct.success.add(_elem1573); + _elem1591 = iprot.readString(); + struct.success.add(_elem1591); } iprot.readListEnd(); } @@ -182938,9 +184225,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1575 : struct.success) + for (String _iter1593 : struct.success) { - oprot.writeString(_iter1575); + oprot.writeString(_iter1593); } oprot.writeListEnd(); } @@ -182979,9 +184266,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1576 : struct.success) + for (String _iter1594 : struct.success) { - oprot.writeString(_iter1576); + oprot.writeString(_iter1594); } } } @@ -182996,13 +184283,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1577 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1577.size); - String _elem1578; - for (int _i1579 = 0; _i1579 < _list1577.size; ++_i1579) + org.apache.thrift.protocol.TList _list1595 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1595.size); + String _elem1596; + for (int _i1597 = 0; _i1597 < _list1595.size; ++_i1597) { - _elem1578 = iprot.readString(); - struct.success.add(_elem1578); + _elem1596 = iprot.readString(); + struct.success.add(_elem1596); } } struct.setSuccessIsSet(true); @@ -188293,13 +189580,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1580 = iprot.readListBegin(); - struct.success = new ArrayList(_list1580.size); - String _elem1581; - for (int _i1582 = 0; _i1582 < _list1580.size; ++_i1582) + org.apache.thrift.protocol.TList _list1598 = iprot.readListBegin(); + struct.success = new ArrayList(_list1598.size); + String _elem1599; + for (int _i1600 = 0; _i1600 < _list1598.size; ++_i1600) { - _elem1581 = iprot.readString(); - struct.success.add(_elem1581); + _elem1599 = iprot.readString(); + struct.success.add(_elem1599); } iprot.readListEnd(); } @@ -188325,9 +189612,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1583 : struct.success) + for (String _iter1601 : struct.success) { - oprot.writeString(_iter1583); + oprot.writeString(_iter1601); } oprot.writeListEnd(); } @@ -188358,9 +189645,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1584 : struct.success) + for (String _iter1602 : struct.success) { - oprot.writeString(_iter1584); + oprot.writeString(_iter1602); } } } @@ -188372,13 +189659,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1585 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1585.size); - String _elem1586; - for (int _i1587 = 0; _i1587 < _list1585.size; ++_i1587) + org.apache.thrift.protocol.TList _list1603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1603.size); + String _elem1604; + for (int _i1605 = 0; _i1605 < _list1603.size; ++_i1605) { - _elem1586 = iprot.readString(); - struct.success.add(_elem1586); + _elem1604 = iprot.readString(); + struct.success.add(_elem1604); } } struct.setSuccessIsSet(true); @@ -191408,13 +192695,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1588 = iprot.readListBegin(); - struct.success = new ArrayList(_list1588.size); - String _elem1589; - for (int _i1590 = 0; _i1590 < _list1588.size; ++_i1590) + org.apache.thrift.protocol.TList _list1606 = iprot.readListBegin(); + struct.success = new ArrayList(_list1606.size); + String _elem1607; + for (int _i1608 = 0; _i1608 < _list1606.size; ++_i1608) { - _elem1589 = iprot.readString(); - struct.success.add(_elem1589); + _elem1607 = iprot.readString(); + struct.success.add(_elem1607); } iprot.readListEnd(); } @@ -191440,9 +192727,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1591 : struct.success) + for (String _iter1609 : struct.success) { - oprot.writeString(_iter1591); + oprot.writeString(_iter1609); } oprot.writeListEnd(); } @@ -191473,9 +192760,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1592 : struct.success) + for (String _iter1610 : struct.success) { - oprot.writeString(_iter1592); + oprot.writeString(_iter1610); } } } @@ -191487,13 +192774,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1593 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1593.size); - String _elem1594; - for (int _i1595 = 0; _i1595 < _list1593.size; ++_i1595) + org.apache.thrift.protocol.TList _list1611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1611.size); + String _elem1612; + for (int _i1613 = 0; _i1613 < _list1611.size; ++_i1613) { - _elem1594 = iprot.readString(); - struct.success.add(_elem1594); + _elem1612 = iprot.readString(); + struct.success.add(_elem1612); } } struct.setSuccessIsSet(true); @@ -208614,13 +209901,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, find_columns_with_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1596 = iprot.readListBegin(); - struct.success = new ArrayList(_list1596.size); - String _elem1597; - for (int _i1598 = 0; _i1598 < _list1596.size; ++_i1598) + org.apache.thrift.protocol.TList _list1614 = iprot.readListBegin(); + struct.success = new ArrayList(_list1614.size); + String _elem1615; + for (int _i1616 = 0; _i1616 < _list1614.size; ++_i1616) { - _elem1597 = iprot.readString(); - struct.success.add(_elem1597); + _elem1615 = iprot.readString(); + struct.success.add(_elem1615); } iprot.readListEnd(); } @@ -208646,9 +209933,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, find_columns_with_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1599 : struct.success) + for (String _iter1617 : struct.success) { - oprot.writeString(_iter1599); + oprot.writeString(_iter1617); } oprot.writeListEnd(); } @@ -208679,9 +209966,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, find_columns_with_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1600 : struct.success) + for (String _iter1618 : struct.success) { - oprot.writeString(_iter1600); + oprot.writeString(_iter1618); } } } @@ -208693,13 +209980,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, find_columns_with_st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1601 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1601.size); - String _elem1602; - for (int _i1603 = 0; _i1603 < _list1601.size; ++_i1603) + org.apache.thrift.protocol.TList _list1619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1619.size); + String _elem1620; + for (int _i1621 = 0; _i1621 < _list1619.size; ++_i1621) { - _elem1602 = iprot.readString(); - struct.success.add(_elem1602); + _elem1620 = iprot.readString(); + struct.success.add(_elem1620); } } struct.setSuccessIsSet(true); @@ -245585,14 +246872,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_all_vers case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1604 = iprot.readListBegin(); - struct.success = new ArrayList(_list1604.size); - SchemaVersion _elem1605; - for (int _i1606 = 0; _i1606 < _list1604.size; ++_i1606) + org.apache.thrift.protocol.TList _list1622 = iprot.readListBegin(); + struct.success = new ArrayList(_list1622.size); + SchemaVersion _elem1623; + for (int _i1624 = 0; _i1624 < _list1622.size; ++_i1624) { - _elem1605 = new SchemaVersion(); - _elem1605.read(iprot); - struct.success.add(_elem1605); + _elem1623 = new SchemaVersion(); + _elem1623.read(iprot); + struct.success.add(_elem1623); } iprot.readListEnd(); } @@ -245636,9 +246923,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_all_ver oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (SchemaVersion _iter1607 : struct.success) + for (SchemaVersion _iter1625 : struct.success) { - _iter1607.write(oprot); + _iter1625.write(oprot); } oprot.writeListEnd(); } @@ -245685,9 +246972,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_all_vers if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (SchemaVersion _iter1608 : struct.success) + for (SchemaVersion _iter1626 : struct.success) { - _iter1608.write(oprot); + _iter1626.write(oprot); } } } @@ -245705,14 +246992,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_all_versi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1609 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1609.size); - SchemaVersion _elem1610; - for (int _i1611 = 0; _i1611 < _list1609.size; ++_i1611) + org.apache.thrift.protocol.TList _list1627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1627.size); + SchemaVersion _elem1628; + for (int _i1629 = 0; _i1629 < _list1627.size; ++_i1629) { - _elem1610 = new SchemaVersion(); - _elem1610.read(iprot); - struct.success.add(_elem1610); + _elem1628 = new SchemaVersion(); + _elem1628.read(iprot); + struct.success.add(_elem1628); } } struct.setSuccessIsSet(true); @@ -254255,14 +255542,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1612 = iprot.readListBegin(); - struct.success = new ArrayList(_list1612.size); - RuntimeStat _elem1613; - for (int _i1614 = 0; _i1614 < _list1612.size; ++_i1614) + org.apache.thrift.protocol.TList _list1630 = iprot.readListBegin(); + struct.success = new ArrayList(_list1630.size); + RuntimeStat _elem1631; + for (int _i1632 = 0; _i1632 < _list1630.size; ++_i1632) { - _elem1613 = new RuntimeStat(); - _elem1613.read(iprot); - struct.success.add(_elem1613); + _elem1631 = new RuntimeStat(); + _elem1631.read(iprot); + struct.success.add(_elem1631); } iprot.readListEnd(); } @@ -254297,9 +255584,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (RuntimeStat _iter1615 : struct.success) + for (RuntimeStat _iter1633 : struct.success) { - _iter1615.write(oprot); + _iter1633.write(oprot); } oprot.writeListEnd(); } @@ -254338,9 +255625,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (RuntimeStat _iter1616 : struct.success) + for (RuntimeStat _iter1634 : struct.success) { - _iter1616.write(oprot); + _iter1634.write(oprot); } } } @@ -254355,14 +255642,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1617 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1617.size); - RuntimeStat _elem1618; - for (int _i1619 = 0; _i1619 < _list1617.size; ++_i1619) + org.apache.thrift.protocol.TList _list1635 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1635.size); + RuntimeStat _elem1636; + for (int _i1637 = 0; _i1637 < _list1635.size; ++_i1637) { - _elem1618 = new RuntimeStat(); - _elem1618.read(iprot); - struct.success.add(_elem1618); + _elem1636 = new RuntimeStat(); + _elem1636.read(iprot); + struct.success.add(_elem1636); } } struct.setSuccessIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index f41a02b3cb..ab965299ef 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -547,6 +547,14 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @throws \metastore\NoSuchObjectException */ public function get_partition($db_name, $tbl_name, array $part_vals); + /** + * @param array $partitionsMap + * @param int $last_accesstime + * @return bool + * @throws \metastore\NoSuchObjectException + * @throws \metastore\MetaException + */ + public function update_last_access_time(array $partitionsMap, $last_accesstime); /** * @param array $partitionSpecs * @param string $source_db @@ -5524,6 +5532,64 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partition failed: unknown result"); } + public function update_last_access_time(array $partitionsMap, $last_accesstime) + { + $this->send_update_last_access_time($partitionsMap, $last_accesstime); + return $this->recv_update_last_access_time(); + } + + public function send_update_last_access_time(array $partitionsMap, $last_accesstime) + { + $args = new \metastore\ThriftHiveMetastore_update_last_access_time_args(); + $args->partitionsMap = $partitionsMap; + $args->last_accesstime = $last_accesstime; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'update_last_access_time', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('update_last_access_time', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_update_last_access_time() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_update_last_access_time_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_update_last_access_time_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + throw new \Exception("update_last_access_time failed: unknown result"); + } + public function exchange_partition(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name) { $this->send_exchange_partition($partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name); @@ -30228,6 +30294,294 @@ class ThriftHiveMetastore_get_partition_result { } +class ThriftHiveMetastore_update_last_access_time_args { + static $_TSPEC; + + /** + * @var array + */ + public $partitionsMap = null; + /** + * @var int + */ + public $last_accesstime = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'partitionsMap', + 'type' => TType::MAP, + 'ktype' => TType::STRING, + 'vtype' => TType::SET, + 'key' => array( + 'type' => TType::STRING, + ), + 'val' => array( + 'type' => TType::SET, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + ), + 2 => array( + 'var' => 'last_accesstime', + 'type' => TType::I32, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['partitionsMap'])) { + $this->partitionsMap = $vals['partitionsMap']; + } + if (isset($vals['last_accesstime'])) { + $this->last_accesstime = $vals['last_accesstime']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_update_last_access_time_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::MAP) { + $this->partitionsMap = array(); + $_size1129 = 0; + $_ktype1130 = 0; + $_vtype1131 = 0; + $xfer += $input->readMapBegin($_ktype1130, $_vtype1131, $_size1129); + for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) + { + $key1134 = ''; + $val1135 = array(); + $xfer += $input->readString($key1134); + $val1135 = array(); + $_size1136 = 0; + $_etype1139 = 0; + $xfer += $input->readSetBegin($_etype1139, $_size1136); + for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140) + { + $elem1141 = null; + $xfer += $input->readString($elem1141); + if (is_scalar($elem1141)) { + $val1135[$elem1141] = true; + } else { + $val1135 []= $elem1141; + } + } + $xfer += $input->readSetEnd(); + $this->partitionsMap[$key1134] = $val1135; + } + $xfer += $input->readMapEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->last_accesstime); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_last_access_time_args'); + if ($this->partitionsMap !== null) { + if (!is_array($this->partitionsMap)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('partitionsMap', TType::MAP, 1); + { + $output->writeMapBegin(TType::STRING, TType::SET, count($this->partitionsMap)); + { + foreach ($this->partitionsMap as $kiter1142 => $viter1143) + { + $xfer += $output->writeString($kiter1142); + { + $output->writeSetBegin(TType::STRING, count($viter1143)); + { + foreach ($viter1143 as $iter1144 => $iter1145) + { + if (is_scalar($iter1145)) { + $xfer += $output->writeString($iter1144); + } else { + $xfer += $output->writeString($iter1145); + } + } + } + $output->writeSetEnd(); + } + } + } + $output->writeMapEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->last_accesstime !== null) { + $xfer += $output->writeFieldBegin('last_accesstime', TType::I32, 2); + $xfer += $output->writeI32($this->last_accesstime); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_update_last_access_time_result { + static $_TSPEC; + + /** + * @var bool + */ + public $success = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::BOOL, + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_update_last_access_time_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->success); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_last_access_time_result'); + if ($this->success !== null) { + $xfer += $output->writeFieldBegin('success', TType::BOOL, 0); + $xfer += $output->writeBool($this->success); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_exchange_partition_args { static $_TSPEC; @@ -30326,17 +30680,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1129 = 0; - $_ktype1130 = 0; - $_vtype1131 = 0; - $xfer += $input->readMapBegin($_ktype1130, $_vtype1131, $_size1129); - for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) + $_size1146 = 0; + $_ktype1147 = 0; + $_vtype1148 = 0; + $xfer += $input->readMapBegin($_ktype1147, $_vtype1148, $_size1146); + for ($_i1150 = 0; $_i1150 < $_size1146; ++$_i1150) { - $key1134 = ''; - $val1135 = ''; - $xfer += $input->readString($key1134); - $xfer += $input->readString($val1135); - $this->partitionSpecs[$key1134] = $val1135; + $key1151 = ''; + $val1152 = ''; + $xfer += $input->readString($key1151); + $xfer += $input->readString($val1152); + $this->partitionSpecs[$key1151] = $val1152; } $xfer += $input->readMapEnd(); } else { @@ -30392,10 +30746,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1136 => $viter1137) + foreach ($this->partitionSpecs as $kiter1153 => $viter1154) { - $xfer += $output->writeString($kiter1136); - $xfer += $output->writeString($viter1137); + $xfer += $output->writeString($kiter1153); + $xfer += $output->writeString($viter1154); } } $output->writeMapEnd(); @@ -30707,17 +31061,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1138 = 0; - $_ktype1139 = 0; - $_vtype1140 = 0; - $xfer += $input->readMapBegin($_ktype1139, $_vtype1140, $_size1138); - for ($_i1142 = 0; $_i1142 < $_size1138; ++$_i1142) + $_size1155 = 0; + $_ktype1156 = 0; + $_vtype1157 = 0; + $xfer += $input->readMapBegin($_ktype1156, $_vtype1157, $_size1155); + for ($_i1159 = 0; $_i1159 < $_size1155; ++$_i1159) { - $key1143 = ''; - $val1144 = ''; - $xfer += $input->readString($key1143); - $xfer += $input->readString($val1144); - $this->partitionSpecs[$key1143] = $val1144; + $key1160 = ''; + $val1161 = ''; + $xfer += $input->readString($key1160); + $xfer += $input->readString($val1161); + $this->partitionSpecs[$key1160] = $val1161; } $xfer += $input->readMapEnd(); } else { @@ -30773,10 +31127,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1145 => $viter1146) + foreach ($this->partitionSpecs as $kiter1162 => $viter1163) { - $xfer += $output->writeString($kiter1145); - $xfer += $output->writeString($viter1146); + $xfer += $output->writeString($kiter1162); + $xfer += $output->writeString($viter1163); } } $output->writeMapEnd(); @@ -30909,15 +31263,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1147 = 0; - $_etype1150 = 0; - $xfer += $input->readListBegin($_etype1150, $_size1147); - for ($_i1151 = 0; $_i1151 < $_size1147; ++$_i1151) + $_size1164 = 0; + $_etype1167 = 0; + $xfer += $input->readListBegin($_etype1167, $_size1164); + for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) { - $elem1152 = null; - $elem1152 = new \metastore\Partition(); - $xfer += $elem1152->read($input); - $this->success []= $elem1152; + $elem1169 = null; + $elem1169 = new \metastore\Partition(); + $xfer += $elem1169->read($input); + $this->success []= $elem1169; } $xfer += $input->readListEnd(); } else { @@ -30977,9 +31331,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1153) + foreach ($this->success as $iter1170) { - $xfer += $iter1153->write($output); + $xfer += $iter1170->write($output); } } $output->writeListEnd(); @@ -31125,14 +31479,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1154 = 0; - $_etype1157 = 0; - $xfer += $input->readListBegin($_etype1157, $_size1154); - for ($_i1158 = 0; $_i1158 < $_size1154; ++$_i1158) + $_size1171 = 0; + $_etype1174 = 0; + $xfer += $input->readListBegin($_etype1174, $_size1171); + for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) { - $elem1159 = null; - $xfer += $input->readString($elem1159); - $this->part_vals []= $elem1159; + $elem1176 = null; + $xfer += $input->readString($elem1176); + $this->part_vals []= $elem1176; } $xfer += $input->readListEnd(); } else { @@ -31149,14 +31503,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1160 = 0; - $_etype1163 = 0; - $xfer += $input->readListBegin($_etype1163, $_size1160); - for ($_i1164 = 0; $_i1164 < $_size1160; ++$_i1164) + $_size1177 = 0; + $_etype1180 = 0; + $xfer += $input->readListBegin($_etype1180, $_size1177); + for ($_i1181 = 0; $_i1181 < $_size1177; ++$_i1181) { - $elem1165 = null; - $xfer += $input->readString($elem1165); - $this->group_names []= $elem1165; + $elem1182 = null; + $xfer += $input->readString($elem1182); + $this->group_names []= $elem1182; } $xfer += $input->readListEnd(); } else { @@ -31194,9 +31548,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1166) + foreach ($this->part_vals as $iter1183) { - $xfer += $output->writeString($iter1166); + $xfer += $output->writeString($iter1183); } } $output->writeListEnd(); @@ -31216,9 +31570,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1167) + foreach ($this->group_names as $iter1184) { - $xfer += $output->writeString($iter1167); + $xfer += $output->writeString($iter1184); } } $output->writeListEnd(); @@ -31809,15 +32163,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1168 = 0; - $_etype1171 = 0; - $xfer += $input->readListBegin($_etype1171, $_size1168); - for ($_i1172 = 0; $_i1172 < $_size1168; ++$_i1172) + $_size1185 = 0; + $_etype1188 = 0; + $xfer += $input->readListBegin($_etype1188, $_size1185); + for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) { - $elem1173 = null; - $elem1173 = new \metastore\Partition(); - $xfer += $elem1173->read($input); - $this->success []= $elem1173; + $elem1190 = null; + $elem1190 = new \metastore\Partition(); + $xfer += $elem1190->read($input); + $this->success []= $elem1190; } $xfer += $input->readListEnd(); } else { @@ -31861,9 +32215,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1174) + foreach ($this->success as $iter1191) { - $xfer += $iter1174->write($output); + $xfer += $iter1191->write($output); } } $output->writeListEnd(); @@ -32009,14 +32363,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1175 = 0; - $_etype1178 = 0; - $xfer += $input->readListBegin($_etype1178, $_size1175); - for ($_i1179 = 0; $_i1179 < $_size1175; ++$_i1179) + $_size1192 = 0; + $_etype1195 = 0; + $xfer += $input->readListBegin($_etype1195, $_size1192); + for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196) { - $elem1180 = null; - $xfer += $input->readString($elem1180); - $this->group_names []= $elem1180; + $elem1197 = null; + $xfer += $input->readString($elem1197); + $this->group_names []= $elem1197; } $xfer += $input->readListEnd(); } else { @@ -32064,9 +32418,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1181) + foreach ($this->group_names as $iter1198) { - $xfer += $output->writeString($iter1181); + $xfer += $output->writeString($iter1198); } } $output->writeListEnd(); @@ -32155,15 +32509,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1182 = 0; - $_etype1185 = 0; - $xfer += $input->readListBegin($_etype1185, $_size1182); - for ($_i1186 = 0; $_i1186 < $_size1182; ++$_i1186) + $_size1199 = 0; + $_etype1202 = 0; + $xfer += $input->readListBegin($_etype1202, $_size1199); + for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203) { - $elem1187 = null; - $elem1187 = new \metastore\Partition(); - $xfer += $elem1187->read($input); - $this->success []= $elem1187; + $elem1204 = null; + $elem1204 = new \metastore\Partition(); + $xfer += $elem1204->read($input); + $this->success []= $elem1204; } $xfer += $input->readListEnd(); } else { @@ -32207,9 +32561,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1188) + foreach ($this->success as $iter1205) { - $xfer += $iter1188->write($output); + $xfer += $iter1205->write($output); } } $output->writeListEnd(); @@ -32429,15 +32783,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1189 = 0; - $_etype1192 = 0; - $xfer += $input->readListBegin($_etype1192, $_size1189); - for ($_i1193 = 0; $_i1193 < $_size1189; ++$_i1193) + $_size1206 = 0; + $_etype1209 = 0; + $xfer += $input->readListBegin($_etype1209, $_size1206); + for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210) { - $elem1194 = null; - $elem1194 = new \metastore\PartitionSpec(); - $xfer += $elem1194->read($input); - $this->success []= $elem1194; + $elem1211 = null; + $elem1211 = new \metastore\PartitionSpec(); + $xfer += $elem1211->read($input); + $this->success []= $elem1211; } $xfer += $input->readListEnd(); } else { @@ -32481,9 +32835,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1195) + foreach ($this->success as $iter1212) { - $xfer += $iter1195->write($output); + $xfer += $iter1212->write($output); } } $output->writeListEnd(); @@ -32702,14 +33056,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1196 = 0; - $_etype1199 = 0; - $xfer += $input->readListBegin($_etype1199, $_size1196); - for ($_i1200 = 0; $_i1200 < $_size1196; ++$_i1200) + $_size1213 = 0; + $_etype1216 = 0; + $xfer += $input->readListBegin($_etype1216, $_size1213); + for ($_i1217 = 0; $_i1217 < $_size1213; ++$_i1217) { - $elem1201 = null; - $xfer += $input->readString($elem1201); - $this->success []= $elem1201; + $elem1218 = null; + $xfer += $input->readString($elem1218); + $this->success []= $elem1218; } $xfer += $input->readListEnd(); } else { @@ -32753,9 +33107,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1202) + foreach ($this->success as $iter1219) { - $xfer += $output->writeString($iter1202); + $xfer += $output->writeString($iter1219); } } $output->writeListEnd(); @@ -33086,14 +33440,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1203 = 0; - $_etype1206 = 0; - $xfer += $input->readListBegin($_etype1206, $_size1203); - for ($_i1207 = 0; $_i1207 < $_size1203; ++$_i1207) + $_size1220 = 0; + $_etype1223 = 0; + $xfer += $input->readListBegin($_etype1223, $_size1220); + for ($_i1224 = 0; $_i1224 < $_size1220; ++$_i1224) { - $elem1208 = null; - $xfer += $input->readString($elem1208); - $this->part_vals []= $elem1208; + $elem1225 = null; + $xfer += $input->readString($elem1225); + $this->part_vals []= $elem1225; } $xfer += $input->readListEnd(); } else { @@ -33138,9 +33492,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1209) + foreach ($this->part_vals as $iter1226) { - $xfer += $output->writeString($iter1209); + $xfer += $output->writeString($iter1226); } } $output->writeListEnd(); @@ -33234,15 +33588,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1210 = 0; - $_etype1213 = 0; - $xfer += $input->readListBegin($_etype1213, $_size1210); - for ($_i1214 = 0; $_i1214 < $_size1210; ++$_i1214) + $_size1227 = 0; + $_etype1230 = 0; + $xfer += $input->readListBegin($_etype1230, $_size1227); + for ($_i1231 = 0; $_i1231 < $_size1227; ++$_i1231) { - $elem1215 = null; - $elem1215 = new \metastore\Partition(); - $xfer += $elem1215->read($input); - $this->success []= $elem1215; + $elem1232 = null; + $elem1232 = new \metastore\Partition(); + $xfer += $elem1232->read($input); + $this->success []= $elem1232; } $xfer += $input->readListEnd(); } else { @@ -33286,9 +33640,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1216) + foreach ($this->success as $iter1233) { - $xfer += $iter1216->write($output); + $xfer += $iter1233->write($output); } } $output->writeListEnd(); @@ -33435,14 +33789,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1217 = 0; - $_etype1220 = 0; - $xfer += $input->readListBegin($_etype1220, $_size1217); - for ($_i1221 = 0; $_i1221 < $_size1217; ++$_i1221) + $_size1234 = 0; + $_etype1237 = 0; + $xfer += $input->readListBegin($_etype1237, $_size1234); + for ($_i1238 = 0; $_i1238 < $_size1234; ++$_i1238) { - $elem1222 = null; - $xfer += $input->readString($elem1222); - $this->part_vals []= $elem1222; + $elem1239 = null; + $xfer += $input->readString($elem1239); + $this->part_vals []= $elem1239; } $xfer += $input->readListEnd(); } else { @@ -33466,14 +33820,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1223 = 0; - $_etype1226 = 0; - $xfer += $input->readListBegin($_etype1226, $_size1223); - for ($_i1227 = 0; $_i1227 < $_size1223; ++$_i1227) + $_size1240 = 0; + $_etype1243 = 0; + $xfer += $input->readListBegin($_etype1243, $_size1240); + for ($_i1244 = 0; $_i1244 < $_size1240; ++$_i1244) { - $elem1228 = null; - $xfer += $input->readString($elem1228); - $this->group_names []= $elem1228; + $elem1245 = null; + $xfer += $input->readString($elem1245); + $this->group_names []= $elem1245; } $xfer += $input->readListEnd(); } else { @@ -33511,9 +33865,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1229) + foreach ($this->part_vals as $iter1246) { - $xfer += $output->writeString($iter1229); + $xfer += $output->writeString($iter1246); } } $output->writeListEnd(); @@ -33538,9 +33892,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1230) + foreach ($this->group_names as $iter1247) { - $xfer += $output->writeString($iter1230); + $xfer += $output->writeString($iter1247); } } $output->writeListEnd(); @@ -33629,15 +33983,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1231 = 0; - $_etype1234 = 0; - $xfer += $input->readListBegin($_etype1234, $_size1231); - for ($_i1235 = 0; $_i1235 < $_size1231; ++$_i1235) + $_size1248 = 0; + $_etype1251 = 0; + $xfer += $input->readListBegin($_etype1251, $_size1248); + for ($_i1252 = 0; $_i1252 < $_size1248; ++$_i1252) { - $elem1236 = null; - $elem1236 = new \metastore\Partition(); - $xfer += $elem1236->read($input); - $this->success []= $elem1236; + $elem1253 = null; + $elem1253 = new \metastore\Partition(); + $xfer += $elem1253->read($input); + $this->success []= $elem1253; } $xfer += $input->readListEnd(); } else { @@ -33681,9 +34035,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1237) + foreach ($this->success as $iter1254) { - $xfer += $iter1237->write($output); + $xfer += $iter1254->write($output); } } $output->writeListEnd(); @@ -33804,14 +34158,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1238 = 0; - $_etype1241 = 0; - $xfer += $input->readListBegin($_etype1241, $_size1238); - for ($_i1242 = 0; $_i1242 < $_size1238; ++$_i1242) + $_size1255 = 0; + $_etype1258 = 0; + $xfer += $input->readListBegin($_etype1258, $_size1255); + for ($_i1259 = 0; $_i1259 < $_size1255; ++$_i1259) { - $elem1243 = null; - $xfer += $input->readString($elem1243); - $this->part_vals []= $elem1243; + $elem1260 = null; + $xfer += $input->readString($elem1260); + $this->part_vals []= $elem1260; } $xfer += $input->readListEnd(); } else { @@ -33856,9 +34210,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1244) + foreach ($this->part_vals as $iter1261) { - $xfer += $output->writeString($iter1244); + $xfer += $output->writeString($iter1261); } } $output->writeListEnd(); @@ -33951,14 +34305,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1245 = 0; - $_etype1248 = 0; - $xfer += $input->readListBegin($_etype1248, $_size1245); - for ($_i1249 = 0; $_i1249 < $_size1245; ++$_i1249) + $_size1262 = 0; + $_etype1265 = 0; + $xfer += $input->readListBegin($_etype1265, $_size1262); + for ($_i1266 = 0; $_i1266 < $_size1262; ++$_i1266) { - $elem1250 = null; - $xfer += $input->readString($elem1250); - $this->success []= $elem1250; + $elem1267 = null; + $xfer += $input->readString($elem1267); + $this->success []= $elem1267; } $xfer += $input->readListEnd(); } else { @@ -34002,9 +34356,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1251) + foreach ($this->success as $iter1268) { - $xfer += $output->writeString($iter1251); + $xfer += $output->writeString($iter1268); } } $output->writeListEnd(); @@ -34247,15 +34601,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1252 = 0; - $_etype1255 = 0; - $xfer += $input->readListBegin($_etype1255, $_size1252); - for ($_i1256 = 0; $_i1256 < $_size1252; ++$_i1256) + $_size1269 = 0; + $_etype1272 = 0; + $xfer += $input->readListBegin($_etype1272, $_size1269); + for ($_i1273 = 0; $_i1273 < $_size1269; ++$_i1273) { - $elem1257 = null; - $elem1257 = new \metastore\Partition(); - $xfer += $elem1257->read($input); - $this->success []= $elem1257; + $elem1274 = null; + $elem1274 = new \metastore\Partition(); + $xfer += $elem1274->read($input); + $this->success []= $elem1274; } $xfer += $input->readListEnd(); } else { @@ -34299,9 +34653,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1258) + foreach ($this->success as $iter1275) { - $xfer += $iter1258->write($output); + $xfer += $iter1275->write($output); } } $output->writeListEnd(); @@ -34544,15 +34898,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1259 = 0; - $_etype1262 = 0; - $xfer += $input->readListBegin($_etype1262, $_size1259); - for ($_i1263 = 0; $_i1263 < $_size1259; ++$_i1263) + $_size1276 = 0; + $_etype1279 = 0; + $xfer += $input->readListBegin($_etype1279, $_size1276); + for ($_i1280 = 0; $_i1280 < $_size1276; ++$_i1280) { - $elem1264 = null; - $elem1264 = new \metastore\PartitionSpec(); - $xfer += $elem1264->read($input); - $this->success []= $elem1264; + $elem1281 = null; + $elem1281 = new \metastore\PartitionSpec(); + $xfer += $elem1281->read($input); + $this->success []= $elem1281; } $xfer += $input->readListEnd(); } else { @@ -34596,9 +34950,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1265) + foreach ($this->success as $iter1282) { - $xfer += $iter1265->write($output); + $xfer += $iter1282->write($output); } } $output->writeListEnd(); @@ -35164,14 +35518,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1266 = 0; - $_etype1269 = 0; - $xfer += $input->readListBegin($_etype1269, $_size1266); - for ($_i1270 = 0; $_i1270 < $_size1266; ++$_i1270) + $_size1283 = 0; + $_etype1286 = 0; + $xfer += $input->readListBegin($_etype1286, $_size1283); + for ($_i1287 = 0; $_i1287 < $_size1283; ++$_i1287) { - $elem1271 = null; - $xfer += $input->readString($elem1271); - $this->names []= $elem1271; + $elem1288 = null; + $xfer += $input->readString($elem1288); + $this->names []= $elem1288; } $xfer += $input->readListEnd(); } else { @@ -35209,9 +35563,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1272) + foreach ($this->names as $iter1289) { - $xfer += $output->writeString($iter1272); + $xfer += $output->writeString($iter1289); } } $output->writeListEnd(); @@ -35300,15 +35654,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1273 = 0; - $_etype1276 = 0; - $xfer += $input->readListBegin($_etype1276, $_size1273); - for ($_i1277 = 0; $_i1277 < $_size1273; ++$_i1277) + $_size1290 = 0; + $_etype1293 = 0; + $xfer += $input->readListBegin($_etype1293, $_size1290); + for ($_i1294 = 0; $_i1294 < $_size1290; ++$_i1294) { - $elem1278 = null; - $elem1278 = new \metastore\Partition(); - $xfer += $elem1278->read($input); - $this->success []= $elem1278; + $elem1295 = null; + $elem1295 = new \metastore\Partition(); + $xfer += $elem1295->read($input); + $this->success []= $elem1295; } $xfer += $input->readListEnd(); } else { @@ -35352,9 +35706,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1279) + foreach ($this->success as $iter1296) { - $xfer += $iter1279->write($output); + $xfer += $iter1296->write($output); } } $output->writeListEnd(); @@ -35903,15 +36257,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1280 = 0; - $_etype1283 = 0; - $xfer += $input->readListBegin($_etype1283, $_size1280); - for ($_i1284 = 0; $_i1284 < $_size1280; ++$_i1284) + $_size1297 = 0; + $_etype1300 = 0; + $xfer += $input->readListBegin($_etype1300, $_size1297); + for ($_i1301 = 0; $_i1301 < $_size1297; ++$_i1301) { - $elem1285 = null; - $elem1285 = new \metastore\Partition(); - $xfer += $elem1285->read($input); - $this->new_parts []= $elem1285; + $elem1302 = null; + $elem1302 = new \metastore\Partition(); + $xfer += $elem1302->read($input); + $this->new_parts []= $elem1302; } $xfer += $input->readListEnd(); } else { @@ -35949,9 +36303,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1286) + foreach ($this->new_parts as $iter1303) { - $xfer += $iter1286->write($output); + $xfer += $iter1303->write($output); } } $output->writeListEnd(); @@ -36166,15 +36520,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1287 = 0; - $_etype1290 = 0; - $xfer += $input->readListBegin($_etype1290, $_size1287); - for ($_i1291 = 0; $_i1291 < $_size1287; ++$_i1291) + $_size1304 = 0; + $_etype1307 = 0; + $xfer += $input->readListBegin($_etype1307, $_size1304); + for ($_i1308 = 0; $_i1308 < $_size1304; ++$_i1308) { - $elem1292 = null; - $elem1292 = new \metastore\Partition(); - $xfer += $elem1292->read($input); - $this->new_parts []= $elem1292; + $elem1309 = null; + $elem1309 = new \metastore\Partition(); + $xfer += $elem1309->read($input); + $this->new_parts []= $elem1309; } $xfer += $input->readListEnd(); } else { @@ -36220,9 +36574,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1293) + foreach ($this->new_parts as $iter1310) { - $xfer += $iter1293->write($output); + $xfer += $iter1310->write($output); } } $output->writeListEnd(); @@ -36910,14 +37264,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1294 = 0; - $_etype1297 = 0; - $xfer += $input->readListBegin($_etype1297, $_size1294); - for ($_i1298 = 0; $_i1298 < $_size1294; ++$_i1298) + $_size1311 = 0; + $_etype1314 = 0; + $xfer += $input->readListBegin($_etype1314, $_size1311); + for ($_i1315 = 0; $_i1315 < $_size1311; ++$_i1315) { - $elem1299 = null; - $xfer += $input->readString($elem1299); - $this->part_vals []= $elem1299; + $elem1316 = null; + $xfer += $input->readString($elem1316); + $this->part_vals []= $elem1316; } $xfer += $input->readListEnd(); } else { @@ -36963,9 +37317,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1300) + foreach ($this->part_vals as $iter1317) { - $xfer += $output->writeString($iter1300); + $xfer += $output->writeString($iter1317); } } $output->writeListEnd(); @@ -37360,14 +37714,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1301 = 0; - $_etype1304 = 0; - $xfer += $input->readListBegin($_etype1304, $_size1301); - for ($_i1305 = 0; $_i1305 < $_size1301; ++$_i1305) + $_size1318 = 0; + $_etype1321 = 0; + $xfer += $input->readListBegin($_etype1321, $_size1318); + for ($_i1322 = 0; $_i1322 < $_size1318; ++$_i1322) { - $elem1306 = null; - $xfer += $input->readString($elem1306); - $this->part_vals []= $elem1306; + $elem1323 = null; + $xfer += $input->readString($elem1323); + $this->part_vals []= $elem1323; } $xfer += $input->readListEnd(); } else { @@ -37402,9 +37756,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1307) + foreach ($this->part_vals as $iter1324) { - $xfer += $output->writeString($iter1307); + $xfer += $output->writeString($iter1324); } } $output->writeListEnd(); @@ -37858,14 +38212,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1308 = 0; - $_etype1311 = 0; - $xfer += $input->readListBegin($_etype1311, $_size1308); - for ($_i1312 = 0; $_i1312 < $_size1308; ++$_i1312) + $_size1325 = 0; + $_etype1328 = 0; + $xfer += $input->readListBegin($_etype1328, $_size1325); + for ($_i1329 = 0; $_i1329 < $_size1325; ++$_i1329) { - $elem1313 = null; - $xfer += $input->readString($elem1313); - $this->success []= $elem1313; + $elem1330 = null; + $xfer += $input->readString($elem1330); + $this->success []= $elem1330; } $xfer += $input->readListEnd(); } else { @@ -37901,9 +38255,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1314) + foreach ($this->success as $iter1331) { - $xfer += $output->writeString($iter1314); + $xfer += $output->writeString($iter1331); } } $output->writeListEnd(); @@ -38063,17 +38417,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1315 = 0; - $_ktype1316 = 0; - $_vtype1317 = 0; - $xfer += $input->readMapBegin($_ktype1316, $_vtype1317, $_size1315); - for ($_i1319 = 0; $_i1319 < $_size1315; ++$_i1319) + $_size1332 = 0; + $_ktype1333 = 0; + $_vtype1334 = 0; + $xfer += $input->readMapBegin($_ktype1333, $_vtype1334, $_size1332); + for ($_i1336 = 0; $_i1336 < $_size1332; ++$_i1336) { - $key1320 = ''; - $val1321 = ''; - $xfer += $input->readString($key1320); - $xfer += $input->readString($val1321); - $this->success[$key1320] = $val1321; + $key1337 = ''; + $val1338 = ''; + $xfer += $input->readString($key1337); + $xfer += $input->readString($val1338); + $this->success[$key1337] = $val1338; } $xfer += $input->readMapEnd(); } else { @@ -38109,10 +38463,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1322 => $viter1323) + foreach ($this->success as $kiter1339 => $viter1340) { - $xfer += $output->writeString($kiter1322); - $xfer += $output->writeString($viter1323); + $xfer += $output->writeString($kiter1339); + $xfer += $output->writeString($viter1340); } } $output->writeMapEnd(); @@ -38232,17 +38586,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1324 = 0; - $_ktype1325 = 0; - $_vtype1326 = 0; - $xfer += $input->readMapBegin($_ktype1325, $_vtype1326, $_size1324); - for ($_i1328 = 0; $_i1328 < $_size1324; ++$_i1328) + $_size1341 = 0; + $_ktype1342 = 0; + $_vtype1343 = 0; + $xfer += $input->readMapBegin($_ktype1342, $_vtype1343, $_size1341); + for ($_i1345 = 0; $_i1345 < $_size1341; ++$_i1345) { - $key1329 = ''; - $val1330 = ''; - $xfer += $input->readString($key1329); - $xfer += $input->readString($val1330); - $this->part_vals[$key1329] = $val1330; + $key1346 = ''; + $val1347 = ''; + $xfer += $input->readString($key1346); + $xfer += $input->readString($val1347); + $this->part_vals[$key1346] = $val1347; } $xfer += $input->readMapEnd(); } else { @@ -38287,10 +38641,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1331 => $viter1332) + foreach ($this->part_vals as $kiter1348 => $viter1349) { - $xfer += $output->writeString($kiter1331); - $xfer += $output->writeString($viter1332); + $xfer += $output->writeString($kiter1348); + $xfer += $output->writeString($viter1349); } } $output->writeMapEnd(); @@ -38612,17 +38966,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1333 = 0; - $_ktype1334 = 0; - $_vtype1335 = 0; - $xfer += $input->readMapBegin($_ktype1334, $_vtype1335, $_size1333); - for ($_i1337 = 0; $_i1337 < $_size1333; ++$_i1337) + $_size1350 = 0; + $_ktype1351 = 0; + $_vtype1352 = 0; + $xfer += $input->readMapBegin($_ktype1351, $_vtype1352, $_size1350); + for ($_i1354 = 0; $_i1354 < $_size1350; ++$_i1354) { - $key1338 = ''; - $val1339 = ''; - $xfer += $input->readString($key1338); - $xfer += $input->readString($val1339); - $this->part_vals[$key1338] = $val1339; + $key1355 = ''; + $val1356 = ''; + $xfer += $input->readString($key1355); + $xfer += $input->readString($val1356); + $this->part_vals[$key1355] = $val1356; } $xfer += $input->readMapEnd(); } else { @@ -38667,10 +39021,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1340 => $viter1341) + foreach ($this->part_vals as $kiter1357 => $viter1358) { - $xfer += $output->writeString($kiter1340); - $xfer += $output->writeString($viter1341); + $xfer += $output->writeString($kiter1357); + $xfer += $output->writeString($viter1358); } } $output->writeMapEnd(); @@ -44149,14 +44503,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1342 = 0; - $_etype1345 = 0; - $xfer += $input->readListBegin($_etype1345, $_size1342); - for ($_i1346 = 0; $_i1346 < $_size1342; ++$_i1346) + $_size1359 = 0; + $_etype1362 = 0; + $xfer += $input->readListBegin($_etype1362, $_size1359); + for ($_i1363 = 0; $_i1363 < $_size1359; ++$_i1363) { - $elem1347 = null; - $xfer += $input->readString($elem1347); - $this->success []= $elem1347; + $elem1364 = null; + $xfer += $input->readString($elem1364); + $this->success []= $elem1364; } $xfer += $input->readListEnd(); } else { @@ -44192,9 +44546,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1348) + foreach ($this->success as $iter1365) { - $xfer += $output->writeString($iter1348); + $xfer += $output->writeString($iter1365); } } $output->writeListEnd(); @@ -45063,14 +45417,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1349 = 0; - $_etype1352 = 0; - $xfer += $input->readListBegin($_etype1352, $_size1349); - for ($_i1353 = 0; $_i1353 < $_size1349; ++$_i1353) + $_size1366 = 0; + $_etype1369 = 0; + $xfer += $input->readListBegin($_etype1369, $_size1366); + for ($_i1370 = 0; $_i1370 < $_size1366; ++$_i1370) { - $elem1354 = null; - $xfer += $input->readString($elem1354); - $this->success []= $elem1354; + $elem1371 = null; + $xfer += $input->readString($elem1371); + $this->success []= $elem1371; } $xfer += $input->readListEnd(); } else { @@ -45106,9 +45460,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1355) + foreach ($this->success as $iter1372) { - $xfer += $output->writeString($iter1355); + $xfer += $output->writeString($iter1372); } } $output->writeListEnd(); @@ -45799,15 +46153,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1356 = 0; - $_etype1359 = 0; - $xfer += $input->readListBegin($_etype1359, $_size1356); - for ($_i1360 = 0; $_i1360 < $_size1356; ++$_i1360) + $_size1373 = 0; + $_etype1376 = 0; + $xfer += $input->readListBegin($_etype1376, $_size1373); + for ($_i1377 = 0; $_i1377 < $_size1373; ++$_i1377) { - $elem1361 = null; - $elem1361 = new \metastore\Role(); - $xfer += $elem1361->read($input); - $this->success []= $elem1361; + $elem1378 = null; + $elem1378 = new \metastore\Role(); + $xfer += $elem1378->read($input); + $this->success []= $elem1378; } $xfer += $input->readListEnd(); } else { @@ -45843,9 +46197,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1362) + foreach ($this->success as $iter1379) { - $xfer += $iter1362->write($output); + $xfer += $iter1379->write($output); } } $output->writeListEnd(); @@ -46507,14 +46861,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1363 = 0; - $_etype1366 = 0; - $xfer += $input->readListBegin($_etype1366, $_size1363); - for ($_i1367 = 0; $_i1367 < $_size1363; ++$_i1367) + $_size1380 = 0; + $_etype1383 = 0; + $xfer += $input->readListBegin($_etype1383, $_size1380); + for ($_i1384 = 0; $_i1384 < $_size1380; ++$_i1384) { - $elem1368 = null; - $xfer += $input->readString($elem1368); - $this->group_names []= $elem1368; + $elem1385 = null; + $xfer += $input->readString($elem1385); + $this->group_names []= $elem1385; } $xfer += $input->readListEnd(); } else { @@ -46555,9 +46909,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1369) + foreach ($this->group_names as $iter1386) { - $xfer += $output->writeString($iter1369); + $xfer += $output->writeString($iter1386); } } $output->writeListEnd(); @@ -46865,15 +47219,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1370 = 0; - $_etype1373 = 0; - $xfer += $input->readListBegin($_etype1373, $_size1370); - for ($_i1374 = 0; $_i1374 < $_size1370; ++$_i1374) + $_size1387 = 0; + $_etype1390 = 0; + $xfer += $input->readListBegin($_etype1390, $_size1387); + for ($_i1391 = 0; $_i1391 < $_size1387; ++$_i1391) { - $elem1375 = null; - $elem1375 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1375->read($input); - $this->success []= $elem1375; + $elem1392 = null; + $elem1392 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1392->read($input); + $this->success []= $elem1392; } $xfer += $input->readListEnd(); } else { @@ -46909,9 +47263,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1376) + foreach ($this->success as $iter1393) { - $xfer += $iter1376->write($output); + $xfer += $iter1393->write($output); } } $output->writeListEnd(); @@ -47779,14 +48133,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1377 = 0; - $_etype1380 = 0; - $xfer += $input->readListBegin($_etype1380, $_size1377); - for ($_i1381 = 0; $_i1381 < $_size1377; ++$_i1381) + $_size1394 = 0; + $_etype1397 = 0; + $xfer += $input->readListBegin($_etype1397, $_size1394); + for ($_i1398 = 0; $_i1398 < $_size1394; ++$_i1398) { - $elem1382 = null; - $xfer += $input->readString($elem1382); - $this->group_names []= $elem1382; + $elem1399 = null; + $xfer += $input->readString($elem1399); + $this->group_names []= $elem1399; } $xfer += $input->readListEnd(); } else { @@ -47819,9 +48173,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1383) + foreach ($this->group_names as $iter1400) { - $xfer += $output->writeString($iter1383); + $xfer += $output->writeString($iter1400); } } $output->writeListEnd(); @@ -47897,14 +48251,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1384 = 0; - $_etype1387 = 0; - $xfer += $input->readListBegin($_etype1387, $_size1384); - for ($_i1388 = 0; $_i1388 < $_size1384; ++$_i1388) + $_size1401 = 0; + $_etype1404 = 0; + $xfer += $input->readListBegin($_etype1404, $_size1401); + for ($_i1405 = 0; $_i1405 < $_size1401; ++$_i1405) { - $elem1389 = null; - $xfer += $input->readString($elem1389); - $this->success []= $elem1389; + $elem1406 = null; + $xfer += $input->readString($elem1406); + $this->success []= $elem1406; } $xfer += $input->readListEnd(); } else { @@ -47940,9 +48294,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1390) + foreach ($this->success as $iter1407) { - $xfer += $output->writeString($iter1390); + $xfer += $output->writeString($iter1407); } } $output->writeListEnd(); @@ -49059,14 +49413,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1391 = 0; - $_etype1394 = 0; - $xfer += $input->readListBegin($_etype1394, $_size1391); - for ($_i1395 = 0; $_i1395 < $_size1391; ++$_i1395) + $_size1408 = 0; + $_etype1411 = 0; + $xfer += $input->readListBegin($_etype1411, $_size1408); + for ($_i1412 = 0; $_i1412 < $_size1408; ++$_i1412) { - $elem1396 = null; - $xfer += $input->readString($elem1396); - $this->success []= $elem1396; + $elem1413 = null; + $xfer += $input->readString($elem1413); + $this->success []= $elem1413; } $xfer += $input->readListEnd(); } else { @@ -49094,9 +49448,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1397) + foreach ($this->success as $iter1414) { - $xfer += $output->writeString($iter1397); + $xfer += $output->writeString($iter1414); } } $output->writeListEnd(); @@ -49735,14 +50089,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1398 = 0; - $_etype1401 = 0; - $xfer += $input->readListBegin($_etype1401, $_size1398); - for ($_i1402 = 0; $_i1402 < $_size1398; ++$_i1402) + $_size1415 = 0; + $_etype1418 = 0; + $xfer += $input->readListBegin($_etype1418, $_size1415); + for ($_i1419 = 0; $_i1419 < $_size1415; ++$_i1419) { - $elem1403 = null; - $xfer += $input->readString($elem1403); - $this->success []= $elem1403; + $elem1420 = null; + $xfer += $input->readString($elem1420); + $this->success []= $elem1420; } $xfer += $input->readListEnd(); } else { @@ -49770,9 +50124,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1404) + foreach ($this->success as $iter1421) { - $xfer += $output->writeString($iter1404); + $xfer += $output->writeString($iter1421); } } $output->writeListEnd(); @@ -53526,14 +53880,14 @@ class ThriftHiveMetastore_find_columns_with_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1405 = 0; - $_etype1408 = 0; - $xfer += $input->readListBegin($_etype1408, $_size1405); - for ($_i1409 = 0; $_i1409 < $_size1405; ++$_i1409) + $_size1422 = 0; + $_etype1425 = 0; + $xfer += $input->readListBegin($_etype1425, $_size1422); + for ($_i1426 = 0; $_i1426 < $_size1422; ++$_i1426) { - $elem1410 = null; - $xfer += $input->readString($elem1410); - $this->success []= $elem1410; + $elem1427 = null; + $xfer += $input->readString($elem1427); + $this->success []= $elem1427; } $xfer += $input->readListEnd(); } else { @@ -53561,9 +53915,9 @@ class ThriftHiveMetastore_find_columns_with_stats_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1411) + foreach ($this->success as $iter1428) { - $xfer += $output->writeString($iter1411); + $xfer += $output->writeString($iter1428); } } $output->writeListEnd(); @@ -61734,15 +62088,15 @@ class ThriftHiveMetastore_get_schema_all_versions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1412 = 0; - $_etype1415 = 0; - $xfer += $input->readListBegin($_etype1415, $_size1412); - for ($_i1416 = 0; $_i1416 < $_size1412; ++$_i1416) + $_size1429 = 0; + $_etype1432 = 0; + $xfer += $input->readListBegin($_etype1432, $_size1429); + for ($_i1433 = 0; $_i1433 < $_size1429; ++$_i1433) { - $elem1417 = null; - $elem1417 = new \metastore\SchemaVersion(); - $xfer += $elem1417->read($input); - $this->success []= $elem1417; + $elem1434 = null; + $elem1434 = new \metastore\SchemaVersion(); + $xfer += $elem1434->read($input); + $this->success []= $elem1434; } $xfer += $input->readListEnd(); } else { @@ -61786,9 +62140,9 @@ class ThriftHiveMetastore_get_schema_all_versions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1418) + foreach ($this->success as $iter1435) { - $xfer += $iter1418->write($output); + $xfer += $iter1435->write($output); } } $output->writeListEnd(); @@ -63657,15 +64011,15 @@ class ThriftHiveMetastore_get_runtime_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1419 = 0; - $_etype1422 = 0; - $xfer += $input->readListBegin($_etype1422, $_size1419); - for ($_i1423 = 0; $_i1423 < $_size1419; ++$_i1423) + $_size1436 = 0; + $_etype1439 = 0; + $xfer += $input->readListBegin($_etype1439, $_size1436); + for ($_i1440 = 0; $_i1440 < $_size1436; ++$_i1440) { - $elem1424 = null; - $elem1424 = new \metastore\RuntimeStat(); - $xfer += $elem1424->read($input); - $this->success []= $elem1424; + $elem1441 = null; + $elem1441 = new \metastore\RuntimeStat(); + $xfer += $elem1441->read($input); + $this->success []= $elem1441; } $xfer += $input->readListEnd(); } else { @@ -63701,9 +64055,9 @@ class ThriftHiveMetastore_get_runtime_stats_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1425) + foreach ($this->success as $iter1442) { - $xfer += $iter1425->write($output); + $xfer += $iter1442->write($output); } } $output->writeListEnd(); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index fb2747c8c3..2234346cbe 100755 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -91,6 +91,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' bool drop_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext environment_context)') print(' DropPartitionsResult drop_partitions_req(DropPartitionsRequest req)') print(' Partition get_partition(string db_name, string tbl_name, part_vals)') + print(' bool update_last_access_time( partitionsMap, i32 last_accesstime)') print(' Partition exchange_partition( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)') print(' exchange_partitions( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)') print(' Partition get_partition_with_auth(string db_name, string tbl_name, part_vals, string user_name, group_names)') @@ -719,6 +720,12 @@ elif cmd == 'get_partition': sys.exit(1) pp.pprint(client.get_partition(args[0],args[1],eval(args[2]),)) +elif cmd == 'update_last_access_time': + if len(args) != 2: + print('update_last_access_time requires 2 args') + sys.exit(1) + pp.pprint(client.update_last_access_time(eval(args[0]),eval(args[1]),)) + elif cmd == 'exchange_partition': if len(args) != 5: print('exchange_partition requires 5 args') diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 2be349fbad..c6a378ac84 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -552,6 +552,14 @@ def get_partition(self, db_name, tbl_name, part_vals): """ pass + def update_last_access_time(self, partitionsMap, last_accesstime): + """ + Parameters: + - partitionsMap + - last_accesstime + """ + pass + def exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): """ Parameters: @@ -4180,6 +4188,43 @@ def recv_get_partition(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition failed: unknown result") + def update_last_access_time(self, partitionsMap, last_accesstime): + """ + Parameters: + - partitionsMap + - last_accesstime + """ + self.send_update_last_access_time(partitionsMap, last_accesstime) + return self.recv_update_last_access_time() + + def send_update_last_access_time(self, partitionsMap, last_accesstime): + self._oprot.writeMessageBegin('update_last_access_time', TMessageType.CALL, self._seqid) + args = update_last_access_time_args() + args.partitionsMap = partitionsMap + args.last_accesstime = last_accesstime + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_update_last_access_time(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = update_last_access_time_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "update_last_access_time failed: unknown result") + def exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): """ Parameters: @@ -9751,6 +9796,7 @@ def __init__(self, handler): self._processMap["drop_partition_by_name_with_environment_context"] = Processor.process_drop_partition_by_name_with_environment_context self._processMap["drop_partitions_req"] = Processor.process_drop_partitions_req self._processMap["get_partition"] = Processor.process_get_partition + self._processMap["update_last_access_time"] = Processor.process_update_last_access_time self._processMap["exchange_partition"] = Processor.process_exchange_partition self._processMap["exchange_partitions"] = Processor.process_exchange_partitions self._processMap["get_partition_with_auth"] = Processor.process_get_partition_with_auth @@ -11638,6 +11684,31 @@ def process_get_partition(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_update_last_access_time(self, seqid, iprot, oprot): + args = update_last_access_time_args() + args.read(iprot) + iprot.readMessageEnd() + result = update_last_access_time_result() + try: + result.success = self._handler.update_last_access_time(args.partitionsMap, args.last_accesstime) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("update_last_access_time", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_exchange_partition(self, seqid, iprot, oprot): args = exchange_partition_args() args.read(iprot) @@ -27045,6 +27116,194 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class update_last_access_time_args: + """ + Attributes: + - partitionsMap + - last_accesstime + """ + + thrift_spec = ( + None, # 0 + (1, TType.MAP, 'partitionsMap', (TType.STRING,None,TType.SET,(TType.STRING,None)), None, ), # 1 + (2, TType.I32, 'last_accesstime', None, None, ), # 2 + ) + + def __init__(self, partitionsMap=None, last_accesstime=None,): + self.partitionsMap = partitionsMap + self.last_accesstime = last_accesstime + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.MAP: + self.partitionsMap = {} + (_ktype1127, _vtype1128, _size1126 ) = iprot.readMapBegin() + for _i1130 in xrange(_size1126): + _key1131 = iprot.readString() + _val1132 = set() + (_etype1136, _size1133) = iprot.readSetBegin() + for _i1137 in xrange(_size1133): + _elem1138 = iprot.readString() + _val1132.add(_elem1138) + iprot.readSetEnd() + self.partitionsMap[_key1131] = _val1132 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.last_accesstime = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('update_last_access_time_args') + if self.partitionsMap is not None: + oprot.writeFieldBegin('partitionsMap', TType.MAP, 1) + oprot.writeMapBegin(TType.STRING, TType.SET, len(self.partitionsMap)) + for kiter1139,viter1140 in self.partitionsMap.items(): + oprot.writeString(kiter1139) + oprot.writeSetBegin(TType.STRING, len(viter1140)) + for iter1141 in viter1140: + oprot.writeString(iter1141) + oprot.writeSetEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.last_accesstime is not None: + oprot.writeFieldBegin('last_accesstime', TType.I32, 2) + oprot.writeI32(self.last_accesstime) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.partitionsMap) + value = (value * 31) ^ hash(self.last_accesstime) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class update_last_access_time_result: + """ + Attributes: + - success + - o1 + - o2 + """ + + thrift_spec = ( + (0, TType.BOOL, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + ) + + def __init__(self, success=None, o1=None, o2=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('update_last_access_time_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class exchange_partition_args: """ Attributes: @@ -27083,11 +27342,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1127, _vtype1128, _size1126 ) = iprot.readMapBegin() - for _i1130 in xrange(_size1126): - _key1131 = iprot.readString() - _val1132 = iprot.readString() - self.partitionSpecs[_key1131] = _val1132 + (_ktype1143, _vtype1144, _size1142 ) = iprot.readMapBegin() + for _i1146 in xrange(_size1142): + _key1147 = iprot.readString() + _val1148 = iprot.readString() + self.partitionSpecs[_key1147] = _val1148 iprot.readMapEnd() else: iprot.skip(ftype) @@ -27124,9 +27383,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1133,viter1134 in self.partitionSpecs.items(): - oprot.writeString(kiter1133) - oprot.writeString(viter1134) + for kiter1149,viter1150 in self.partitionSpecs.items(): + oprot.writeString(kiter1149) + oprot.writeString(viter1150) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -27331,11 +27590,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1136, _vtype1137, _size1135 ) = iprot.readMapBegin() - for _i1139 in xrange(_size1135): - _key1140 = iprot.readString() - _val1141 = iprot.readString() - self.partitionSpecs[_key1140] = _val1141 + (_ktype1152, _vtype1153, _size1151 ) = iprot.readMapBegin() + for _i1155 in xrange(_size1151): + _key1156 = iprot.readString() + _val1157 = iprot.readString() + self.partitionSpecs[_key1156] = _val1157 iprot.readMapEnd() else: iprot.skip(ftype) @@ -27372,9 +27631,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1142,viter1143 in self.partitionSpecs.items(): - oprot.writeString(kiter1142) - oprot.writeString(viter1143) + for kiter1158,viter1159 in self.partitionSpecs.items(): + oprot.writeString(kiter1158) + oprot.writeString(viter1159) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -27457,11 +27716,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1147, _size1144) = iprot.readListBegin() - for _i1148 in xrange(_size1144): - _elem1149 = Partition() - _elem1149.read(iprot) - self.success.append(_elem1149) + (_etype1163, _size1160) = iprot.readListBegin() + for _i1164 in xrange(_size1160): + _elem1165 = Partition() + _elem1165.read(iprot) + self.success.append(_elem1165) iprot.readListEnd() else: iprot.skip(ftype) @@ -27502,8 +27761,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1150 in self.success: - iter1150.write(oprot) + for iter1166 in self.success: + iter1166.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27597,10 +27856,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1154, _size1151) = iprot.readListBegin() - for _i1155 in xrange(_size1151): - _elem1156 = iprot.readString() - self.part_vals.append(_elem1156) + (_etype1170, _size1167) = iprot.readListBegin() + for _i1171 in xrange(_size1167): + _elem1172 = iprot.readString() + self.part_vals.append(_elem1172) iprot.readListEnd() else: iprot.skip(ftype) @@ -27612,10 +27871,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1160, _size1157) = iprot.readListBegin() - for _i1161 in xrange(_size1157): - _elem1162 = iprot.readString() - self.group_names.append(_elem1162) + (_etype1176, _size1173) = iprot.readListBegin() + for _i1177 in xrange(_size1173): + _elem1178 = iprot.readString() + self.group_names.append(_elem1178) iprot.readListEnd() else: iprot.skip(ftype) @@ -27640,8 +27899,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1163 in self.part_vals: - oprot.writeString(iter1163) + for iter1179 in self.part_vals: + oprot.writeString(iter1179) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -27651,8 +27910,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1164 in self.group_names: - oprot.writeString(iter1164) + for iter1180 in self.group_names: + oprot.writeString(iter1180) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -28081,11 +28340,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1168, _size1165) = iprot.readListBegin() - for _i1169 in xrange(_size1165): - _elem1170 = Partition() - _elem1170.read(iprot) - self.success.append(_elem1170) + (_etype1184, _size1181) = iprot.readListBegin() + for _i1185 in xrange(_size1181): + _elem1186 = Partition() + _elem1186.read(iprot) + self.success.append(_elem1186) iprot.readListEnd() else: iprot.skip(ftype) @@ -28114,8 +28373,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1171 in self.success: - iter1171.write(oprot) + for iter1187 in self.success: + iter1187.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28209,10 +28468,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1175, _size1172) = iprot.readListBegin() - for _i1176 in xrange(_size1172): - _elem1177 = iprot.readString() - self.group_names.append(_elem1177) + (_etype1191, _size1188) = iprot.readListBegin() + for _i1192 in xrange(_size1188): + _elem1193 = iprot.readString() + self.group_names.append(_elem1193) iprot.readListEnd() else: iprot.skip(ftype) @@ -28245,8 +28504,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1178 in self.group_names: - oprot.writeString(iter1178) + for iter1194 in self.group_names: + oprot.writeString(iter1194) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -28307,11 +28566,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1182, _size1179) = iprot.readListBegin() - for _i1183 in xrange(_size1179): - _elem1184 = Partition() - _elem1184.read(iprot) - self.success.append(_elem1184) + (_etype1198, _size1195) = iprot.readListBegin() + for _i1199 in xrange(_size1195): + _elem1200 = Partition() + _elem1200.read(iprot) + self.success.append(_elem1200) iprot.readListEnd() else: iprot.skip(ftype) @@ -28340,8 +28599,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1185 in self.success: - iter1185.write(oprot) + for iter1201 in self.success: + iter1201.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28499,11 +28758,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1189, _size1186) = iprot.readListBegin() - for _i1190 in xrange(_size1186): - _elem1191 = PartitionSpec() - _elem1191.read(iprot) - self.success.append(_elem1191) + (_etype1205, _size1202) = iprot.readListBegin() + for _i1206 in xrange(_size1202): + _elem1207 = PartitionSpec() + _elem1207.read(iprot) + self.success.append(_elem1207) iprot.readListEnd() else: iprot.skip(ftype) @@ -28532,8 +28791,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1192 in self.success: - iter1192.write(oprot) + for iter1208 in self.success: + iter1208.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28691,10 +28950,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1196, _size1193) = iprot.readListBegin() - for _i1197 in xrange(_size1193): - _elem1198 = iprot.readString() - self.success.append(_elem1198) + (_etype1212, _size1209) = iprot.readListBegin() + for _i1213 in xrange(_size1209): + _elem1214 = iprot.readString() + self.success.append(_elem1214) iprot.readListEnd() else: iprot.skip(ftype) @@ -28723,8 +28982,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1199 in self.success: - oprot.writeString(iter1199) + for iter1215 in self.success: + oprot.writeString(iter1215) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28964,10 +29223,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1203, _size1200) = iprot.readListBegin() - for _i1204 in xrange(_size1200): - _elem1205 = iprot.readString() - self.part_vals.append(_elem1205) + (_etype1219, _size1216) = iprot.readListBegin() + for _i1220 in xrange(_size1216): + _elem1221 = iprot.readString() + self.part_vals.append(_elem1221) iprot.readListEnd() else: iprot.skip(ftype) @@ -28997,8 +29256,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1206 in self.part_vals: - oprot.writeString(iter1206) + for iter1222 in self.part_vals: + oprot.writeString(iter1222) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -29062,11 +29321,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1210, _size1207) = iprot.readListBegin() - for _i1211 in xrange(_size1207): - _elem1212 = Partition() - _elem1212.read(iprot) - self.success.append(_elem1212) + (_etype1226, _size1223) = iprot.readListBegin() + for _i1227 in xrange(_size1223): + _elem1228 = Partition() + _elem1228.read(iprot) + self.success.append(_elem1228) iprot.readListEnd() else: iprot.skip(ftype) @@ -29095,8 +29354,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1213 in self.success: - iter1213.write(oprot) + for iter1229 in self.success: + iter1229.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29183,10 +29442,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1217, _size1214) = iprot.readListBegin() - for _i1218 in xrange(_size1214): - _elem1219 = iprot.readString() - self.part_vals.append(_elem1219) + (_etype1233, _size1230) = iprot.readListBegin() + for _i1234 in xrange(_size1230): + _elem1235 = iprot.readString() + self.part_vals.append(_elem1235) iprot.readListEnd() else: iprot.skip(ftype) @@ -29203,10 +29462,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1223, _size1220) = iprot.readListBegin() - for _i1224 in xrange(_size1220): - _elem1225 = iprot.readString() - self.group_names.append(_elem1225) + (_etype1239, _size1236) = iprot.readListBegin() + for _i1240 in xrange(_size1236): + _elem1241 = iprot.readString() + self.group_names.append(_elem1241) iprot.readListEnd() else: iprot.skip(ftype) @@ -29231,8 +29490,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1226 in self.part_vals: - oprot.writeString(iter1226) + for iter1242 in self.part_vals: + oprot.writeString(iter1242) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -29246,8 +29505,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1227 in self.group_names: - oprot.writeString(iter1227) + for iter1243 in self.group_names: + oprot.writeString(iter1243) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -29309,11 +29568,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1231, _size1228) = iprot.readListBegin() - for _i1232 in xrange(_size1228): - _elem1233 = Partition() - _elem1233.read(iprot) - self.success.append(_elem1233) + (_etype1247, _size1244) = iprot.readListBegin() + for _i1248 in xrange(_size1244): + _elem1249 = Partition() + _elem1249.read(iprot) + self.success.append(_elem1249) iprot.readListEnd() else: iprot.skip(ftype) @@ -29342,8 +29601,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1234 in self.success: - iter1234.write(oprot) + for iter1250 in self.success: + iter1250.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29424,10 +29683,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1238, _size1235) = iprot.readListBegin() - for _i1239 in xrange(_size1235): - _elem1240 = iprot.readString() - self.part_vals.append(_elem1240) + (_etype1254, _size1251) = iprot.readListBegin() + for _i1255 in xrange(_size1251): + _elem1256 = iprot.readString() + self.part_vals.append(_elem1256) iprot.readListEnd() else: iprot.skip(ftype) @@ -29457,8 +29716,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1241 in self.part_vals: - oprot.writeString(iter1241) + for iter1257 in self.part_vals: + oprot.writeString(iter1257) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -29522,10 +29781,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1245, _size1242) = iprot.readListBegin() - for _i1246 in xrange(_size1242): - _elem1247 = iprot.readString() - self.success.append(_elem1247) + (_etype1261, _size1258) = iprot.readListBegin() + for _i1262 in xrange(_size1258): + _elem1263 = iprot.readString() + self.success.append(_elem1263) iprot.readListEnd() else: iprot.skip(ftype) @@ -29554,8 +29813,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1248 in self.success: - oprot.writeString(iter1248) + for iter1264 in self.success: + oprot.writeString(iter1264) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29726,11 +29985,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1252, _size1249) = iprot.readListBegin() - for _i1253 in xrange(_size1249): - _elem1254 = Partition() - _elem1254.read(iprot) - self.success.append(_elem1254) + (_etype1268, _size1265) = iprot.readListBegin() + for _i1269 in xrange(_size1265): + _elem1270 = Partition() + _elem1270.read(iprot) + self.success.append(_elem1270) iprot.readListEnd() else: iprot.skip(ftype) @@ -29759,8 +30018,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1255 in self.success: - iter1255.write(oprot) + for iter1271 in self.success: + iter1271.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29931,11 +30190,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1259, _size1256) = iprot.readListBegin() - for _i1260 in xrange(_size1256): - _elem1261 = PartitionSpec() - _elem1261.read(iprot) - self.success.append(_elem1261) + (_etype1275, _size1272) = iprot.readListBegin() + for _i1276 in xrange(_size1272): + _elem1277 = PartitionSpec() + _elem1277.read(iprot) + self.success.append(_elem1277) iprot.readListEnd() else: iprot.skip(ftype) @@ -29964,8 +30223,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1262 in self.success: - iter1262.write(oprot) + for iter1278 in self.success: + iter1278.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30385,10 +30644,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1266, _size1263) = iprot.readListBegin() - for _i1267 in xrange(_size1263): - _elem1268 = iprot.readString() - self.names.append(_elem1268) + (_etype1282, _size1279) = iprot.readListBegin() + for _i1283 in xrange(_size1279): + _elem1284 = iprot.readString() + self.names.append(_elem1284) iprot.readListEnd() else: iprot.skip(ftype) @@ -30413,8 +30672,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1269 in self.names: - oprot.writeString(iter1269) + for iter1285 in self.names: + oprot.writeString(iter1285) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -30473,11 +30732,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1273, _size1270) = iprot.readListBegin() - for _i1274 in xrange(_size1270): - _elem1275 = Partition() - _elem1275.read(iprot) - self.success.append(_elem1275) + (_etype1289, _size1286) = iprot.readListBegin() + for _i1290 in xrange(_size1286): + _elem1291 = Partition() + _elem1291.read(iprot) + self.success.append(_elem1291) iprot.readListEnd() else: iprot.skip(ftype) @@ -30506,8 +30765,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1276 in self.success: - iter1276.write(oprot) + for iter1292 in self.success: + iter1292.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30916,11 +31175,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1280, _size1277) = iprot.readListBegin() - for _i1281 in xrange(_size1277): - _elem1282 = Partition() - _elem1282.read(iprot) - self.new_parts.append(_elem1282) + (_etype1296, _size1293) = iprot.readListBegin() + for _i1297 in xrange(_size1293): + _elem1298 = Partition() + _elem1298.read(iprot) + self.new_parts.append(_elem1298) iprot.readListEnd() else: iprot.skip(ftype) @@ -30945,8 +31204,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1283 in self.new_parts: - iter1283.write(oprot) + for iter1299 in self.new_parts: + iter1299.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -31099,11 +31358,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1287, _size1284) = iprot.readListBegin() - for _i1288 in xrange(_size1284): - _elem1289 = Partition() - _elem1289.read(iprot) - self.new_parts.append(_elem1289) + (_etype1303, _size1300) = iprot.readListBegin() + for _i1304 in xrange(_size1300): + _elem1305 = Partition() + _elem1305.read(iprot) + self.new_parts.append(_elem1305) iprot.readListEnd() else: iprot.skip(ftype) @@ -31134,8 +31393,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1290 in self.new_parts: - iter1290.write(oprot) + for iter1306 in self.new_parts: + iter1306.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -31638,10 +31897,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1294, _size1291) = iprot.readListBegin() - for _i1295 in xrange(_size1291): - _elem1296 = iprot.readString() - self.part_vals.append(_elem1296) + (_etype1310, _size1307) = iprot.readListBegin() + for _i1311 in xrange(_size1307): + _elem1312 = iprot.readString() + self.part_vals.append(_elem1312) iprot.readListEnd() else: iprot.skip(ftype) @@ -31672,8 +31931,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1297 in self.part_vals: - oprot.writeString(iter1297) + for iter1313 in self.part_vals: + oprot.writeString(iter1313) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -31974,10 +32233,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1301, _size1298) = iprot.readListBegin() - for _i1302 in xrange(_size1298): - _elem1303 = iprot.readString() - self.part_vals.append(_elem1303) + (_etype1317, _size1314) = iprot.readListBegin() + for _i1318 in xrange(_size1314): + _elem1319 = iprot.readString() + self.part_vals.append(_elem1319) iprot.readListEnd() else: iprot.skip(ftype) @@ -31999,8 +32258,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1304 in self.part_vals: - oprot.writeString(iter1304) + for iter1320 in self.part_vals: + oprot.writeString(iter1320) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -32358,10 +32617,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1308, _size1305) = iprot.readListBegin() - for _i1309 in xrange(_size1305): - _elem1310 = iprot.readString() - self.success.append(_elem1310) + (_etype1324, _size1321) = iprot.readListBegin() + for _i1325 in xrange(_size1321): + _elem1326 = iprot.readString() + self.success.append(_elem1326) iprot.readListEnd() else: iprot.skip(ftype) @@ -32384,8 +32643,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1311 in self.success: - oprot.writeString(iter1311) + for iter1327 in self.success: + oprot.writeString(iter1327) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -32509,11 +32768,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1313, _vtype1314, _size1312 ) = iprot.readMapBegin() - for _i1316 in xrange(_size1312): - _key1317 = iprot.readString() - _val1318 = iprot.readString() - self.success[_key1317] = _val1318 + (_ktype1329, _vtype1330, _size1328 ) = iprot.readMapBegin() + for _i1332 in xrange(_size1328): + _key1333 = iprot.readString() + _val1334 = iprot.readString() + self.success[_key1333] = _val1334 iprot.readMapEnd() else: iprot.skip(ftype) @@ -32536,9 +32795,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1319,viter1320 in self.success.items(): - oprot.writeString(kiter1319) - oprot.writeString(viter1320) + for kiter1335,viter1336 in self.success.items(): + oprot.writeString(kiter1335) + oprot.writeString(viter1336) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -32614,11 +32873,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1322, _vtype1323, _size1321 ) = iprot.readMapBegin() - for _i1325 in xrange(_size1321): - _key1326 = iprot.readString() - _val1327 = iprot.readString() - self.part_vals[_key1326] = _val1327 + (_ktype1338, _vtype1339, _size1337 ) = iprot.readMapBegin() + for _i1341 in xrange(_size1337): + _key1342 = iprot.readString() + _val1343 = iprot.readString() + self.part_vals[_key1342] = _val1343 iprot.readMapEnd() else: iprot.skip(ftype) @@ -32648,9 +32907,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1328,viter1329 in self.part_vals.items(): - oprot.writeString(kiter1328) - oprot.writeString(viter1329) + for kiter1344,viter1345 in self.part_vals.items(): + oprot.writeString(kiter1344) + oprot.writeString(viter1345) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -32864,11 +33123,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1331, _vtype1332, _size1330 ) = iprot.readMapBegin() - for _i1334 in xrange(_size1330): - _key1335 = iprot.readString() - _val1336 = iprot.readString() - self.part_vals[_key1335] = _val1336 + (_ktype1347, _vtype1348, _size1346 ) = iprot.readMapBegin() + for _i1350 in xrange(_size1346): + _key1351 = iprot.readString() + _val1352 = iprot.readString() + self.part_vals[_key1351] = _val1352 iprot.readMapEnd() else: iprot.skip(ftype) @@ -32898,9 +33157,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1337,viter1338 in self.part_vals.items(): - oprot.writeString(kiter1337) - oprot.writeString(viter1338) + for kiter1353,viter1354 in self.part_vals.items(): + oprot.writeString(kiter1353) + oprot.writeString(viter1354) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -36926,10 +37185,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1342, _size1339) = iprot.readListBegin() - for _i1343 in xrange(_size1339): - _elem1344 = iprot.readString() - self.success.append(_elem1344) + (_etype1358, _size1355) = iprot.readListBegin() + for _i1359 in xrange(_size1355): + _elem1360 = iprot.readString() + self.success.append(_elem1360) iprot.readListEnd() else: iprot.skip(ftype) @@ -36952,8 +37211,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1345 in self.success: - oprot.writeString(iter1345) + for iter1361 in self.success: + oprot.writeString(iter1361) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -37641,10 +37900,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1349, _size1346) = iprot.readListBegin() - for _i1350 in xrange(_size1346): - _elem1351 = iprot.readString() - self.success.append(_elem1351) + (_etype1365, _size1362) = iprot.readListBegin() + for _i1366 in xrange(_size1362): + _elem1367 = iprot.readString() + self.success.append(_elem1367) iprot.readListEnd() else: iprot.skip(ftype) @@ -37667,8 +37926,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1352 in self.success: - oprot.writeString(iter1352) + for iter1368 in self.success: + oprot.writeString(iter1368) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -38182,11 +38441,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1356, _size1353) = iprot.readListBegin() - for _i1357 in xrange(_size1353): - _elem1358 = Role() - _elem1358.read(iprot) - self.success.append(_elem1358) + (_etype1372, _size1369) = iprot.readListBegin() + for _i1373 in xrange(_size1369): + _elem1374 = Role() + _elem1374.read(iprot) + self.success.append(_elem1374) iprot.readListEnd() else: iprot.skip(ftype) @@ -38209,8 +38468,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1359 in self.success: - iter1359.write(oprot) + for iter1375 in self.success: + iter1375.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -38719,10 +38978,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1363, _size1360) = iprot.readListBegin() - for _i1364 in xrange(_size1360): - _elem1365 = iprot.readString() - self.group_names.append(_elem1365) + (_etype1379, _size1376) = iprot.readListBegin() + for _i1380 in xrange(_size1376): + _elem1381 = iprot.readString() + self.group_names.append(_elem1381) iprot.readListEnd() else: iprot.skip(ftype) @@ -38747,8 +39006,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1366 in self.group_names: - oprot.writeString(iter1366) + for iter1382 in self.group_names: + oprot.writeString(iter1382) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -38975,11 +39234,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1370, _size1367) = iprot.readListBegin() - for _i1371 in xrange(_size1367): - _elem1372 = HiveObjectPrivilege() - _elem1372.read(iprot) - self.success.append(_elem1372) + (_etype1386, _size1383) = iprot.readListBegin() + for _i1387 in xrange(_size1383): + _elem1388 = HiveObjectPrivilege() + _elem1388.read(iprot) + self.success.append(_elem1388) iprot.readListEnd() else: iprot.skip(ftype) @@ -39002,8 +39261,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1373 in self.success: - iter1373.write(oprot) + for iter1389 in self.success: + iter1389.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -39673,10 +39932,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1377, _size1374) = iprot.readListBegin() - for _i1378 in xrange(_size1374): - _elem1379 = iprot.readString() - self.group_names.append(_elem1379) + (_etype1393, _size1390) = iprot.readListBegin() + for _i1394 in xrange(_size1390): + _elem1395 = iprot.readString() + self.group_names.append(_elem1395) iprot.readListEnd() else: iprot.skip(ftype) @@ -39697,8 +39956,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1380 in self.group_names: - oprot.writeString(iter1380) + for iter1396 in self.group_names: + oprot.writeString(iter1396) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -39753,10 +40012,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1384, _size1381) = iprot.readListBegin() - for _i1385 in xrange(_size1381): - _elem1386 = iprot.readString() - self.success.append(_elem1386) + (_etype1400, _size1397) = iprot.readListBegin() + for _i1401 in xrange(_size1397): + _elem1402 = iprot.readString() + self.success.append(_elem1402) iprot.readListEnd() else: iprot.skip(ftype) @@ -39779,8 +40038,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1387 in self.success: - oprot.writeString(iter1387) + for iter1403 in self.success: + oprot.writeString(iter1403) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -40712,10 +40971,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1391, _size1388) = iprot.readListBegin() - for _i1392 in xrange(_size1388): - _elem1393 = iprot.readString() - self.success.append(_elem1393) + (_etype1407, _size1404) = iprot.readListBegin() + for _i1408 in xrange(_size1404): + _elem1409 = iprot.readString() + self.success.append(_elem1409) iprot.readListEnd() else: iprot.skip(ftype) @@ -40732,8 +40991,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1394 in self.success: - oprot.writeString(iter1394) + for iter1410 in self.success: + oprot.writeString(iter1410) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -41260,10 +41519,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1398, _size1395) = iprot.readListBegin() - for _i1399 in xrange(_size1395): - _elem1400 = iprot.readString() - self.success.append(_elem1400) + (_etype1414, _size1411) = iprot.readListBegin() + for _i1415 in xrange(_size1411): + _elem1416 = iprot.readString() + self.success.append(_elem1416) iprot.readListEnd() else: iprot.skip(ftype) @@ -41280,8 +41539,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1401 in self.success: - oprot.writeString(iter1401) + for iter1417 in self.success: + oprot.writeString(iter1417) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -44294,10 +44553,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1405, _size1402) = iprot.readListBegin() - for _i1406 in xrange(_size1402): - _elem1407 = iprot.readString() - self.success.append(_elem1407) + (_etype1421, _size1418) = iprot.readListBegin() + for _i1422 in xrange(_size1418): + _elem1423 = iprot.readString() + self.success.append(_elem1423) iprot.readListEnd() else: iprot.skip(ftype) @@ -44314,8 +44573,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1408 in self.success: - oprot.writeString(iter1408) + for iter1424 in self.success: + oprot.writeString(iter1424) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -50625,11 +50884,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1412, _size1409) = iprot.readListBegin() - for _i1413 in xrange(_size1409): - _elem1414 = SchemaVersion() - _elem1414.read(iprot) - self.success.append(_elem1414) + (_etype1428, _size1425) = iprot.readListBegin() + for _i1429 in xrange(_size1425): + _elem1430 = SchemaVersion() + _elem1430.read(iprot) + self.success.append(_elem1430) iprot.readListEnd() else: iprot.skip(ftype) @@ -50658,8 +50917,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1415 in self.success: - iter1415.write(oprot) + for iter1431 in self.success: + iter1431.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -52134,11 +52393,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1419, _size1416) = iprot.readListBegin() - for _i1420 in xrange(_size1416): - _elem1421 = RuntimeStat() - _elem1421.read(iprot) - self.success.append(_elem1421) + (_etype1435, _size1432) = iprot.readListBegin() + for _i1436 in xrange(_size1432): + _elem1437 = RuntimeStat() + _elem1437.read(iprot) + self.success.append(_elem1437) iprot.readListEnd() else: iprot.skip(ftype) @@ -52161,8 +52420,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1422 in self.success: - iter1422.write(oprot) + for iter1438 in self.success: + iter1438.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 75d4de20bd..8fc27bbecb 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -1140,6 +1140,23 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition failed: unknown result') end + def update_last_access_time(partitionsMap, last_accesstime) + send_update_last_access_time(partitionsMap, last_accesstime) + return recv_update_last_access_time() + end + + def send_update_last_access_time(partitionsMap, last_accesstime) + send_message('update_last_access_time', Update_last_access_time_args, :partitionsMap => partitionsMap, :last_accesstime => last_accesstime) + end + + def recv_update_last_access_time() + result = receive_message(Update_last_access_time_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'update_last_access_time failed: unknown result') + end + def exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name) send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name) return recv_exchange_partition() @@ -4629,6 +4646,19 @@ module ThriftHiveMetastore write_result(result, oprot, 'get_partition', seqid) end + def process_update_last_access_time(seqid, iprot, oprot) + args = read_args(iprot, Update_last_access_time_args) + result = Update_last_access_time_result.new() + begin + result.success = @handler.update_last_access_time(args.partitionsMap, args.last_accesstime) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::MetaException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'update_last_access_time', seqid) + end + def process_exchange_partition(seqid, iprot, oprot) args = read_args(iprot, Exchange_partition_args) result = Exchange_partition_result.new() @@ -9051,6 +9081,44 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Update_last_access_time_args + include ::Thrift::Struct, ::Thrift::Struct_Union + PARTITIONSMAP = 1 + LAST_ACCESSTIME = 2 + + FIELDS = { + PARTITIONSMAP => {:type => ::Thrift::Types::MAP, :name => 'partitionsMap', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::SET, :element => {:type => ::Thrift::Types::STRING}}}, + LAST_ACCESSTIME => {:type => ::Thrift::Types::I32, :name => 'last_accesstime'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Update_last_access_time_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::BOOL, :name => 'success'}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Exchange_partition_args include ::Thrift::Struct, ::Thrift::Struct_Union PARTITIONSPECS = 1 diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index c55a6801a9..4893487f8c 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -47,6 +47,7 @@ import java.util.Map.Entry; import java.util.NoSuchElementException; import java.util.Random; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -2308,6 +2309,13 @@ public void alterDatabase(String catName, String dbName, Database newDb) throws return client.get_check_constraints(req).getCheckConstraints(); } + /** {@inheritDoc} */ + @Override + public boolean updateLastAccessTime(Map> partsMap, int lastAccessTime) + throws NoSuchObjectException, MetaException, TException { + return client.update_last_access_time(partsMap, lastAccessTime); + } + /** {@inheritDoc} */ @Override public boolean updateTableColumnStatistics(ColumnStatistics statsObj) throws TException { diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 8999d55905..12a955541e 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -2421,6 +2422,17 @@ String getConfigValue(String name, String defaultValue) Map partitionNameToSpec(String name) throws MetaException, TException; + /** + * Update the timestamp on the set of partitions + * @param Map containing db.tblname for keys and set of partition names for each table + * @param int value of the timestamp in seconds + * @return boolean indicating the status of the operation + * @throws NoSuchObjectException + * @throws MetaException + */ + boolean updateLastAccessTime(Map> partsMap, int lastAccessTime) + throws NoSuchObjectException, MetaException, TException; + /** * Write table level column statistics to persistent store * @param statsObj diff --git standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 9dd2580b66..48f2252c45 100644 --- standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -2047,6 +2047,10 @@ service ThriftHiveMetastore extends fb303.FacebookService Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) throws(1:MetaException o1, 2:NoSuchObjectException o2) + + bool update_last_access_time(1:map> partitionsMap, 2:i32 last_accesstime) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + Partition exchange_partition(1:map partitionSpecs, 2:string source_db, 3:string source_table_name, 4:string dest_db, 5:string dest_table_name) throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 1a694fb980..2b60cea6c0 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -1035,6 +1035,11 @@ private String startFunction(String function) { return startFunction(function, ""); } + private void startUpdateAccessTime(String function, String extraLogInfo) { + LOG.info((getThreadLocalIpAddress() == null ? "" : "source:" + getThreadLocalIpAddress() + " ") + + function + extraLogInfo); + } + private void startTableFunction(String function, String catName, String db, String tbl) { startFunction(function, " : tbl=" + TableName.getQualified(catName, db, tbl)); @@ -4726,6 +4731,23 @@ public Partition get_partition(final String db_name, final String tbl_name, return ret; } + @Override + public boolean update_last_access_time(final Map> partitionsMap, final int lastAccessTime) + throws MetaException, NoSuchObjectException { + startUpdateAccessTime("update_last_access_time", " No of tables to be updated:" + partitionsMap.size()); + try { + return getMS().updateLastAccessTime(partitionsMap, lastAccessTime); + } catch (Exception e) { + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else { + throw newMetaException(e); + } + } + } + /** * Fire a pre-event for read table operation, if there are any * pre-event listeners registered diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 45b89e0ebf..217755f800 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -35,6 +35,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.TreeMap; import java.util.stream.Collectors; @@ -490,6 +491,90 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ queryParams, pms.toArray(), queryText); } + /** + * Updates the timestamp for lastAccessTime + */ + public boolean updateLastAccessTime(final Map> partitionMap, final int lastAccessTime) + throws MetaException { + if (partitionMap.isEmpty()) { + return true; + } + + boolean success = true; + String tableIDQuery = + "select \"TBLS\".\"TBL_ID\" from \"TBLS\", \"DBS\" where \"DBS\".\"DB_ID\" = \"TBLS\".\"DB_ID\" and \"TBLS\".\"TBL_NAME\" = ? and \"DBS\".\"NAME\" = ?"; + + String queryUpdate; + String dbName; + String tblName; + String queryText; + Query query = null; + int size = 0; + Set parts; + ForwardQueryResult result = null; + JDOConnection con = null; + Statement stmt = null; + + for (String key: partitionMap.keySet()) { + dbName = key.substring(0, key.indexOf(".")); + tblName = key.substring(key.indexOf(".") + 1); + query = pm.newQuery("javax.jdo.query.SQL", tableIDQuery); + Long obj; + try { + result = (ForwardQueryResult) query.execute(tblName, dbName); + assert(result.size() == 1); + Object tableID = result.get(0); + obj = MetastoreDirectSqlUtils.extractSqlLong(tableID); + } catch (Exception e) { + LOG.error("Could not find table " + dbName + "." + tblName, e); + // nothing to do if this table cannot be found, should never happen + success = false; + result.close(); + continue; + } + + parts = partitionMap.get(key); + Iterator iter = parts.iterator(); + + while (iter.hasNext()) { + size = (batchSize > 0) ? batchSize : 1000; + queryUpdate = "UPDATE \"PARTITIONS\" SET \"LAST_ACCESS_TIME\" = " + lastAccessTime + " WHERE \"PARTITIONS\".\"TBL_ID\" = "; + queryUpdate += obj.longValue(); + queryUpdate += " and \"PARTITIONS\".\"PART_NAME\" IN ("; + while (iter.hasNext() && size > 0) { + queryUpdate += "'" + (String)iter.next() + "',"; + size--; + } + + if (queryUpdate.endsWith(",")) { + queryUpdate = queryUpdate.substring(0,queryUpdate.length()-1); + } + queryUpdate += ")"; + + try { + LOG.debug("Executing DirectSQL query " + queryUpdate); + con = pm.getDataStoreConnection(); + stmt = ((Connection) con.getNativeConnection()).createStatement(); + stmt.executeUpdate(queryUpdate); + } catch (Exception e) { + LOG.error("Exception occurred during update lastAccessTime for table " + tblName, e); + success = false; + continue; + } finally { + try { + if (stmt != null) { + stmt.close(); + } + } catch (SQLException e) { + throw new MetaException("Could not close statement " + e.getMessage()); + } + con.close(); // We must release the connection before we call other pm methods. + } + } + } + return success; + } + /** * Gets partitions by using direct SQL queries. * @param catName Metastore catalog name. diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 61019c6a1a..00078def2c 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -2642,6 +2642,23 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio return success; } + @Override + public boolean updateLastAccessTime(final Map> partitionsMap, int accessTime) + throws MetaException, NoSuchObjectException { + boolean success = false; + LOG.debug("Updating lastAccessTime on set of size:" + partitionsMap.size()); + try { + openTransaction(); + directSql.updateLastAccessTime(partitionsMap, accessTime); + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + @Override public List getPartitions(String catName, String dbName, String tableName, int maxParts) throws MetaException, NoSuchObjectException { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 6a93e264f1..54a12974ac 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -28,6 +28,7 @@ import java.nio.ByteBuffer; import java.util.List; import java.util.Map; +import java.util.Set; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; @@ -321,6 +322,17 @@ boolean dropPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; + /** + * Update the lastAccessTime on a set of partitions via single directSQL query. + * @param partitionsMap a map keyed by table names with a set of values for each table to be updated. + * @param lastAccessTime the time value, as int, to be updated to for the set. + * @return boolean to indicate if the transaction was successful. + * @throws MetaException Exceptions thrown for RDBMS operations. + * @throws NoSuchObjectException when one or more of the tables are not found. + */ + public abstract boolean updateLastAccessTime(Map> partitionsMap, int lastAccessTime) + throws MetaException, NoSuchObjectException; + /** * Get some or all partitions for a table. * @param catName catalog name. diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index a5d0c046d4..ab57ee2302 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -27,6 +27,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.Stack; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -1384,6 +1385,12 @@ public void dropPartitions(String catName, String dbName, String tblName, List> partitionsMap, int accessTime) + throws MetaException, NoSuchObjectException { + return rawStore.updateLastAccessTime(partitionsMap, accessTime); + } + @Override public List getPartitions(String catName, String dbName, String tblName, int max) throws MetaException, NoSuchObjectException { diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index f202832e0b..11374709a0 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -32,6 +32,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; @@ -267,6 +268,12 @@ public Table getTable(String catName, String dbName, String tableName, String wr return objectStore.getTable(catName, dbName, tableName, writeIdList); } + @Override + public boolean updateLastAccessTime(Map> partsMap, int lastAccessTime) + throws MetaException, NoSuchObjectException { + return objectStore.updateLastAccessTime(partsMap, lastAccessTime); + } + @Override public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 1a7ce04630..f4aa08ba75 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -31,6 +31,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.AggrStats; @@ -251,6 +252,12 @@ public Table getTable(String catalogName, String dbName, String tableName, return null; } + @Override + public boolean updateLastAccessTime(Map> partsMap, int lastAccessTime) + throws NoSuchObjectException, MetaException { + return false; + } + @Override public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 459c7c23ff..272d22dad8 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -45,6 +45,7 @@ import java.util.Map.Entry; import java.util.NoSuchElementException; import java.util.Random; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -1420,6 +1421,13 @@ public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, return fastpath ? p : deepCopy(filterHook.filterPartition(p)); } + /** {@inheritDoc} */ + @Override + public boolean updateLastAccessTime(Map> partitionsMap, int accessTime) throws MetaException, + TException, NoSuchObjectException { + return client.update_last_access_time(partitionsMap, accessTime); + } + /** * @param name * @param dbname